1 Index: linux-2.6.18-53.1.14/include/linux/ext3_fs.h
2 ===================================================================
3 --- linux-2.6.18-53.1.14.orig/include/linux/ext3_fs.h
4 +++ linux-2.6.18-53.1.14/include/linux/ext3_fs.h
6 #define ext3_debug(f, a...) do {} while (0)
9 +#define EXT3_MULTIBLOCK_ALLOCATOR 1
11 +#define EXT3_MB_HINT_MERGE 1 /* prefer goal again. length */
12 +#define EXT3_MB_HINT_RESERVED 2 /* blocks already reserved */
13 +#define EXT3_MB_HINT_METADATA 4 /* metadata is being allocated */
14 +#define EXT3_MB_HINT_FIRST 8 /* first blocks in the file */
15 +#define EXT3_MB_HINT_BEST 16 /* search for the best chunk */
16 +#define EXT3_MB_HINT_DATA 32 /* data is being allocated */
17 +#define EXT3_MB_HINT_NOPREALLOC 64 /* don't preallocate (for tails) */
18 +#define EXT3_MB_HINT_GROUP_ALLOC 128 /* allocate for locality group */
19 +#define EXT3_MB_HINT_GOAL_ONLY 256 /* allocate goal blocks or none */
21 +struct ext3_allocation_request {
22 + struct inode *inode; /* target inode for block we're allocating */
23 + unsigned long logical; /* logical block in target inode */
24 + unsigned long goal; /* phys. target (a hint) */
25 + unsigned long lleft; /* the closest logical allocated block to the left */
26 + unsigned long pleft; /* phys. block for ^^^ */
27 + unsigned long lright; /* the closest logical allocated block to the right */
28 + unsigned long pright; /* phys. block for ^^^ */
29 + unsigned long len; /* how many blocks we want to allocate */
30 + unsigned long flags; /* flags. see above EXT3_MB_HINT_* */
34 * Special inodes numbers
36 @@ -398,6 +422,14 @@ struct ext3_inode {
37 #define ext3_find_first_zero_bit ext2_find_first_zero_bit
38 #define ext3_find_next_zero_bit ext2_find_next_zero_bit
40 +#ifndef ext2_find_next_le_bit
41 +#ifdef __LITTLE_ENDIAN
42 +#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
44 +#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
45 +#endif /* __LITTLE_ENDIAN */
46 +#endif /* !ext2_find_next_le_bit */
49 * Maximal mount counts between two filesystem checks
51 @@ -799,6 +831,20 @@ extern unsigned long ext3_count_dirs (st
52 extern void ext3_check_inodes_bitmap (struct super_block *);
53 extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
56 +extern long ext3_mb_stats;
57 +extern long ext3_mb_max_to_scan;
58 +extern int ext3_mb_init(struct super_block *, int);
59 +extern int ext3_mb_release(struct super_block *);
60 +extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *);
61 +extern int ext3_mb_reserve_blocks(struct super_block *, int);
62 +extern void ext3_mb_release_blocks(struct super_block *, int);
63 +extern void ext3_mb_release_blocks(struct super_block *, int);
64 +extern void ext3_mb_discard_inode_preallocations(struct inode *);
65 +extern int __init init_ext3_mb_proc(void);
66 +extern void exit_ext3_mb_proc(void);
67 +extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *);
71 int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
72 @@ -843,6 +889,10 @@ extern int ext3_group_extend(struct supe
73 ext3_fsblk_t n_blocks_count);
76 +extern struct proc_dir_entry *proc_root_ext3;
77 +extern int __init init_ext3_proc(void);
78 +extern void exit_ext3_proc(void);
80 extern void ext3_error (struct super_block *, const char *, const char *, ...)
81 __attribute__ ((format (printf, 3, 4)));
82 extern void __ext3_std_error (struct super_block *, const char *, int);
83 Index: linux-2.6.18-53.1.14/include/linux/ext3_fs_sb.h
84 ===================================================================
85 --- linux-2.6.18-53.1.14.orig/include/linux/ext3_fs_sb.h
86 +++ linux-2.6.18-53.1.14/include/linux/ext3_fs_sb.h
87 @@ -88,6 +88,61 @@ struct ext3_sb_info {
88 unsigned long s_ext_blocks;
89 unsigned long s_ext_extents;
92 + /* for buddy allocator */
93 + struct ext3_group_info ***s_group_info;
94 + struct inode *s_buddy_cache;
95 + long s_blocks_reserved;
96 + spinlock_t s_reserve_lock;
97 + struct list_head s_active_transaction;
98 + struct list_head s_closed_transaction;
99 + struct list_head s_committed_transaction;
100 + spinlock_t s_md_lock;
101 + tid_t s_last_transaction;
102 + unsigned short *s_mb_offsets, *s_mb_maxs;
105 + unsigned long s_mb_factor;
106 + unsigned long s_stripe;
107 + unsigned long s_mb_stream_request;
108 + unsigned long s_mb_max_to_scan;
109 + unsigned long s_mb_min_to_scan;
110 + unsigned long s_mb_max_groups_to_scan;
111 + unsigned long s_mb_stats;
112 + unsigned long s_mb_order2_reqs;
114 + /* history to debug policy */
115 + struct ext3_mb_history *s_mb_history;
116 + int s_mb_history_cur;
117 + int s_mb_history_max;
118 + int s_mb_history_num;
119 + struct proc_dir_entry *s_dev_proc;
120 + spinlock_t s_mb_history_lock;
121 + int s_mb_history_filter;
123 + /* stats for buddy allocator */
124 + spinlock_t s_mb_pa_lock;
125 + atomic_t s_bal_reqs; /* number of reqs with len > 1 */
126 + atomic_t s_bal_success; /* we found long enough chunks */
127 + atomic_t s_bal_allocated; /* in blocks */
128 + atomic_t s_bal_ex_scanned; /* total extents scanned */
129 + atomic_t s_bal_goals; /* goal hits */
130 + atomic_t s_bal_breaks; /* too long searches */
131 + atomic_t s_bal_2orders; /* 2^order hits */
132 + spinlock_t s_bal_lock;
133 + unsigned long s_mb_buddies_generated;
134 + unsigned long long s_mb_generation_time;
135 + atomic_t s_mb_lost_chunks;
136 + atomic_t s_mb_preallocated;
137 + atomic_t s_mb_discarded;
139 + /* locality groups */
140 + struct ext3_locality_group *s_locality_groups;
144 +#define EXT3_GROUP_INFO(sb, group) \
145 + EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
146 + [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
148 #endif /* _LINUX_EXT3_FS_SB */
149 Index: linux-2.6.18-53.1.14/fs/ext3/super.c
150 ===================================================================
151 --- linux-2.6.18-53.1.14.orig/fs/ext3/super.c
152 +++ linux-2.6.18-53.1.14/fs/ext3/super.c
153 @@ -391,6 +391,7 @@ static void ext3_put_super (struct super
154 struct ext3_super_block *es = sbi->s_es;
157 + ext3_mb_release(sb);
158 ext3_ext_release(sb);
159 ext3_xattr_put_super(sb);
160 journal_destroy(sbi->s_journal);
161 @@ -433,6 +434,8 @@ static void ext3_put_super (struct super
162 invalidate_bdev(sbi->journal_bdev, 0);
163 ext3_blkdev_remove(sbi);
165 + remove_proc_entry(sb->s_id, proc_root_ext3);
166 + sbi->s_dev_proc = NULL;
167 sb->s_fs_info = NULL;
170 @@ -458,6 +461,8 @@ static struct inode *ext3_alloc_inode(st
171 ei->vfs_inode.i_version = 1;
173 memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
174 + INIT_LIST_HEAD(&ei->i_prealloc_list);
175 + spin_lock_init(&ei->i_prealloc_lock);
176 return &ei->vfs_inode;
179 @@ -1454,6 +1459,13 @@ static int ext3_fill_super (struct super
180 sbi->s_mount_opt = 0;
181 sbi->s_resuid = EXT3_DEF_RESUID;
182 sbi->s_resgid = EXT3_DEF_RESGID;
183 + sbi->s_dev_proc = proc_mkdir(sb->s_id, proc_root_ext3);
184 + if (sbi->s_dev_proc == NULL) {
185 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", sb->s_id);
186 + sb->s_fs_info = NULL;
193 @@ -1857,6 +1869,8 @@ failed_mount:
194 ext3_blkdev_remove(sbi);
197 + remove_proc_entry(sb->s_id, proc_root_ext3);
198 + sbi->s_dev_proc = NULL;
199 sb->s_fs_info = NULL;
202 @@ -2782,9 +2796,46 @@ static struct file_system_type ext3_fs_t
203 .fs_flags = FS_REQUIRES_DEV,
206 +#define EXT3_ROOT "ext3"
207 +struct proc_dir_entry *proc_root_ext3;
209 +int __init init_ext3_proc(void)
213 + if ((ret = init_ext3_mb_proc()))
216 + proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
217 + if (proc_root_ext3 == NULL) {
218 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
226 + exit_ext3_mb_proc();
231 +void exit_ext3_proc(void)
233 + exit_ext3_mb_proc();
234 + remove_proc_entry(EXT3_ROOT, proc_root_fs);
237 static int __init init_ext3_fs(void)
239 - int err = init_ext3_xattr();
242 + err = init_ext3_proc();
246 + err = init_ext3_xattr();
249 err = init_inodecache();
250 @@ -2806,6 +2858,7 @@ static void __exit exit_ext3_fs(void)
251 unregister_filesystem(&ext3_fs_type);
252 destroy_inodecache();
257 int ext3_map_inode_page(struct inode *inode, struct page *page,
258 Index: linux-2.6.18-53.1.14/fs/ext3/mballoc.c
259 ===================================================================
261 +++ linux-2.6.18-53.1.14/fs/ext3/mballoc.c
264 + * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
265 + * Written by Alex Tomas <alex@clusterfs.com>
267 + * This program is free software; you can redistribute it and/or modify
268 + * it under the terms of the GNU General Public License version 2 as
269 + * published by the Free Software Foundation.
271 + * This program is distributed in the hope that it will be useful,
272 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
273 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
274 + * GNU General Public License for more details.
276 + * You should have received a copy of the GNU General Public Licens
277 + * along with this program; if not, write to the Free Software
278 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
283 + * mballoc.c contains the multiblocks allocation routines
286 +#include <linux/time.h>
287 +#include <linux/fs.h>
288 +#include <linux/namei.h>
289 +#include <linux/ext3_jbd.h>
290 +#include <linux/jbd.h>
291 +#include <linux/ext3_fs.h>
292 +#include <linux/quotaops.h>
293 +#include <linux/buffer_head.h>
294 +#include <linux/module.h>
295 +#include <linux/swap.h>
296 +#include <linux/proc_fs.h>
297 +#include <linux/pagemap.h>
298 +#include <linux/seq_file.h>
299 +#include <linux/version.h>
303 + * - test ext3_ext_search_left() and ext3_ext_search_right()
304 + * - search for metadata in few groups
307 + * - normalization should take into account whether file is still open
308 + * - discard preallocations if no free space left (policy?)
309 + * - don't normalize tails
311 + * - reservation for superuser
314 + * - bitmap read-ahead (proposed by Oleg Drokin aka green)
315 + * - track min/max extents in each group for better group selection
316 + * - mb_mark_used() may allocate chunk right after splitting buddy
317 + * - tree of groups sorted by number of free blocks
322 + * mballoc operates on the following data:
324 + * - in-core buddy (actually includes buddy and bitmap)
325 + * - preallocation descriptors (PAs)
327 + * there are two types of preallocations:
329 + * assiged to specific inode and can be used for this inode only.
330 + * it describes part of inode's space preallocated to specific
331 + * physical blocks. any block from that preallocated can be used
332 + * independent. the descriptor just tracks number of blocks left
333 + * unused. so, before taking some block from descriptor, one must
334 + * make sure corresponded logical block isn't allocated yet. this
335 + * also means that freeing any block within descriptor's range
336 + * must discard all preallocated blocks.
338 + * assigned to specific locality group which does not translate to
339 + * permanent set of inodes: inode can join and leave group. space
340 + * from this type of preallocation can be used for any inode. thus
341 + * it's consumed from the beginning to the end.
343 + * relation between them can be expressed as:
344 + * in-core buddy = on-disk bitmap + preallocation descriptors
346 + * this mean blocks mballoc considers used are:
347 + * - allocated blocks (persistent)
348 + * - preallocated blocks (non-persistent)
350 + * consistency in mballoc world means that at any time a block is either
351 + * free or used in ALL structures. notice: "any time" should not be read
352 + * literally -- time is discrete and delimited by locks.
354 + * to keep it simple, we don't use block numbers, instead we count number of
355 + * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
357 + * all operations can be expressed as:
358 + * - init buddy: buddy = on-disk + PAs
359 + * - new PA: buddy += N; PA = N
360 + * - use inode PA: on-disk += N; PA -= N
361 + * - discard inode PA buddy -= on-disk - PA; PA = 0
362 + * - use locality group PA on-disk += N; PA -= N
363 + * - discard locality group PA buddy -= PA; PA = 0
364 + * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
365 + * is used in real operation because we can't know actual used
366 + * bits from PA, only from on-disk bitmap
368 + * if we follow this strict logic, then all operations above should be atomic.
369 + * given some of them can block, we'd have to use something like semaphores
370 + * killing performance on high-end SMP hardware. let's try to relax it using
371 + * the following knowledge:
372 + * 1) if buddy is referenced, it's already initialized
373 + * 2) while block is used in buddy and the buddy is referenced,
374 + * nobody can re-allocate that block
375 + * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
376 + * bit set and PA claims same block, it's OK. IOW, one can set bit in
377 + * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
380 + * so, now we're building a concurrency table:
383 + * blocks for PA are allocated in the buddy, buddy must be referenced
384 + * until PA is linked to allocation group to avoid concurrent buddy init
386 + * we need to make sure that either on-disk bitmap or PA has uptodate data
387 + * given (3) we care that PA-=N operation doesn't interfere with init
388 + * - discard inode PA
389 + * the simplest way would be to have buddy initialized by the discard
390 + * - use locality group PA
391 + * again PA-=N must be serialized with init
392 + * - discard locality group PA
393 + * the simplest way would be to have buddy initialized by the discard
396 + * i_truncate_mutex serializes them
397 + * - discard inode PA
398 + * discard process must wait until PA isn't used by another process
399 + * - use locality group PA
400 + * some mutex should serialize them
401 + * - discard locality group PA
402 + * discard process must wait until PA isn't used by another process
405 + * i_truncate_mutex or another mutex should serializes them
406 + * - discard inode PA
407 + * discard process must wait until PA isn't used by another process
408 + * - use locality group PA
409 + * nothing wrong here -- they're different PAs covering different blocks
410 + * - discard locality group PA
411 + * discard process must wait until PA isn't used by another process
413 + * now we're ready to make few consequences:
414 + * - PA is referenced and while it is no discard is possible
415 + * - PA is referenced until block isn't marked in on-disk bitmap
416 + * - PA changes only after on-disk bitmap
417 + * - discard must not compete with init. either init is done before
418 + * any discard or they're serialized somehow
419 + * - buddy init as sum of on-disk bitmap and PAs is done atomically
421 + * a special case when we've used PA to emptiness. no need to modify buddy
422 + * in this case, but we should care about concurrent init
427 + * Logic in few words:
432 + * mark bits in on-disk bitmap
435 + * - use preallocation:
436 + * find proper PA (per-inode or group)
438 + * mark bits in on-disk bitmap
444 + * mark bits in on-disk bitmap
447 + * - discard preallocations in group:
449 + * move them onto local list
450 + * load on-disk bitmap
452 + * remove PA from object (inode or locality group)
453 + * mark free blocks in-core
455 + * - discard inode's preallocations:
462 + * - bitlock on a group (group)
463 + * - object (inode/locality) (object)
464 + * - per-pa lock (pa)
471 + * - find and use pa:
474 + * - release consumed pa:
479 + * - generate in-core bitmap:
483 + * - discard all for given object (inode, locality group):
488 + * - discard all for given group:
497 + * with AGGRESSIVE_CHECK allocator runs consistency checks over
498 + * structures. these checks slow things down a lot
500 +#define AGGRESSIVE_CHECK__
503 + * with DOUBLE_CHECK defined mballoc creates persistent in-core
504 + * bitmaps, maintains and uses them to check for double allocations
506 +#define DOUBLE_CHECK__
512 +#define mb_debug(fmt,a...) printk(fmt, ##a)
514 +#define mb_debug(fmt,a...)
518 + * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
519 + * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
521 +#define EXT3_MB_HISTORY
522 +#define EXT3_MB_HISTORY_ALLOC 1 /* allocation */
523 +#define EXT3_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
524 +#define EXT3_MB_HISTORY_DISCARD 4 /* preallocation discarded */
525 +#define EXT3_MB_HISTORY_FREE 8 /* free */
527 +#define EXT3_MB_HISTORY_DEFAULT (EXT3_MB_HISTORY_ALLOC | \
528 + EXT3_MB_HISTORY_PREALLOC | \
529 + EXT3_MB_HISTORY_DISCARD | \
530 + EXT3_MB_HISTORY_FREE)
533 + * How long mballoc can look for a best extent (in found extents)
535 +#define MB_DEFAULT_MAX_TO_SCAN 200
538 + * How long mballoc must look for a best extent
540 +#define MB_DEFAULT_MIN_TO_SCAN 10
543 + * How many groups mballoc will scan looking for the best chunk
545 +#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
548 + * with 'ext3_mb_stats' allocator will collect stats that will be
549 + * shown at umount. The collecting costs though!
551 +#define MB_DEFAULT_STATS 1
554 + * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
555 + * by the stream allocator, which purpose is to pack requests
556 + * as close each to other as possible to produce smooth I/O traffic
558 +#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
561 + * for which requests use 2^N search using buddies
563 +#define MB_DEFAULT_ORDER2_REQS 8
566 + * default stripe size = 1MB
568 +#define MB_DEFAULT_STRIPE 256
570 +static kmem_cache_t *ext3_pspace_cachep = NULL;
572 +#ifdef EXT3_BB_MAX_BLOCKS
573 +#undef EXT3_BB_MAX_BLOCKS
575 +#define EXT3_BB_MAX_BLOCKS 30
577 +struct ext3_free_metadata {
579 + unsigned short num;
580 + unsigned short blocks[EXT3_BB_MAX_BLOCKS];
581 + struct list_head list;
584 +struct ext3_group_info {
585 + unsigned long bb_state;
586 + unsigned long bb_tid;
587 + struct ext3_free_metadata *bb_md_cur;
588 + unsigned short bb_first_free;
589 + unsigned short bb_free;
590 + unsigned short bb_fragments;
591 + struct list_head bb_prealloc_list;
595 + unsigned short bb_counters[];
598 +#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
599 +#define EXT3_GROUP_INFO_LOCKED_BIT 1
601 +#define EXT3_MB_GRP_NEED_INIT(grp) \
602 + (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
605 +struct ext3_prealloc_space {
606 + struct list_head pa_inode_list;
607 + struct list_head pa_group_list;
609 + struct list_head pa_tmp_list;
610 + struct rcu_head pa_rcu;
612 + spinlock_t pa_lock;
614 + unsigned pa_deleted;
615 + unsigned long pa_pstart; /* phys. block */
616 + unsigned long pa_lstart; /* log. block */
617 + unsigned short pa_len; /* len of preallocated chunk */
618 + unsigned short pa_free; /* how many blocks are free */
619 + unsigned short pa_linear; /* consumed in one direction
620 + * strictly, for group prealloc */
621 + spinlock_t *pa_obj_lock;
622 + struct inode *pa_inode; /* hack, for history only */
626 +struct ext3_free_extent {
627 + unsigned long fe_logical;
628 + unsigned long fe_start;
629 + unsigned long fe_group;
630 + unsigned long fe_len;
635 + * we try to group all related changes together
636 + * so that writeback can flush/allocate them together as well
638 +struct ext3_locality_group {
639 + /* for allocator */
640 + struct semaphore lg_sem; /* to serialize allocates */
641 + struct list_head lg_prealloc_list;/* list of preallocations */
642 + spinlock_t lg_prealloc_lock;
645 +struct ext3_allocation_context {
646 + struct inode *ac_inode;
647 + struct super_block *ac_sb;
649 + /* original request */
650 + struct ext3_free_extent ac_o_ex;
652 + /* goal request (after normalization) */
653 + struct ext3_free_extent ac_g_ex;
655 + /* the best found extent */
656 + struct ext3_free_extent ac_b_ex;
658 + /* copy of the bext found extent taken before preallocation efforts */
659 + struct ext3_free_extent ac_f_ex;
661 + /* number of iterations done. we have to track to limit searching */
662 + unsigned long ac_ex_scanned;
663 + __u16 ac_groups_scanned;
667 + __u16 ac_flags; /* allocation hints */
671 + __u8 ac_2order; /* if request is to allocate 2^N blocks and
672 + * N > 0, the field stores N, otherwise 0 */
673 + __u8 ac_op; /* operation, for history only */
674 + struct page *ac_bitmap_page;
675 + struct page *ac_buddy_page;
676 + struct ext3_prealloc_space *ac_pa;
677 + struct ext3_locality_group *ac_lg;
680 +#define AC_STATUS_CONTINUE 1
681 +#define AC_STATUS_FOUND 2
682 +#define AC_STATUS_BREAK 3
684 +struct ext3_mb_history {
685 + struct ext3_free_extent orig; /* orig allocation */
686 + struct ext3_free_extent goal; /* goal allocation */
687 + struct ext3_free_extent result; /* result allocation */
690 + __u16 found; /* how many extents have been found */
691 + __u16 groups; /* how many groups have been scanned */
692 + __u16 tail; /* what tail broke some buddy */
693 + __u16 buddy; /* buddy the tail ^^^ broke */
695 + __u8 cr:3; /* which phase the result extent was found at */
701 + struct page *bd_buddy_page;
703 + struct page *bd_bitmap_page;
705 + struct ext3_group_info *bd_info;
706 + struct super_block *bd_sb;
708 + unsigned bd_blkbits;
710 +#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
711 +#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
713 +#ifndef EXT3_MB_HISTORY
714 +#define ext3_mb_store_history(ac)
716 +static void ext3_mb_store_history(struct ext3_allocation_context *ac);
719 +#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
721 +int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
722 +struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
723 +unsigned long ext3_new_blocks_old(handle_t *handle, struct inode *inode,
724 + unsigned long goal, unsigned long *count, int *errp);
725 +void ext3_mb_release_blocks(struct super_block *, int);
726 +void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
727 +void ext3_mb_free_committed_blocks(struct super_block *);
728 +void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group);
729 +void ext3_mb_free_consumed_preallocations(struct ext3_allocation_context *ac);
730 +void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
731 + sector_t block, int count);
732 +void ext3_mb_show_ac(struct ext3_allocation_context *ac);
733 +void ext3_mb_check_with_pa(struct ext3_buddy *e3b, int first, int count);
734 +void ext3_mb_put_pa(struct ext3_allocation_context *, struct super_block *, struct ext3_prealloc_space *pa);
735 +int ext3_mb_init_per_dev_proc(struct super_block *sb);
736 +int ext3_mb_destroy_per_dev_proc(struct super_block *sb);
739 + * Calculate the block group number and offset, given a block number
741 +static void ext3_get_group_no_and_offset(struct super_block *sb,
742 + unsigned long blocknr,
743 + unsigned long *blockgrpp,
744 + unsigned long *offsetp)
746 + struct ext3_super_block *es = EXT3_SB(sb)->s_es;
747 + unsigned long offset;
749 + blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
750 + offset = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
751 + blocknr = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
755 + *blockgrpp = blocknr;
760 +ext3_lock_group(struct super_block *sb, int group)
762 + bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
763 + &EXT3_GROUP_INFO(sb, group)->bb_state);
767 +ext3_unlock_group(struct super_block *sb, int group)
769 + bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
770 + &EXT3_GROUP_INFO(sb, group)->bb_state);
774 +ext3_is_group_locked(struct super_block *sb, int group)
776 + return bit_spin_is_locked(EXT3_GROUP_INFO_LOCKED_BIT,
777 + &EXT3_GROUP_INFO(sb, group)->bb_state);
780 +unsigned long ext3_grp_offs_to_block(struct super_block *sb,
781 + struct ext3_free_extent *fex)
783 + unsigned long block;
785 + block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
787 + + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
791 +#if BITS_PER_LONG == 64
792 +#define mb_correct_addr_and_bit(bit,addr) \
794 + bit += ((unsigned long) addr & 7UL) << 3; \
795 + addr = (void *) ((unsigned long) addr & ~7UL); \
797 +#elif BITS_PER_LONG == 32
798 +#define mb_correct_addr_and_bit(bit,addr) \
800 + bit += ((unsigned long) addr & 3UL) << 3; \
801 + addr = (void *) ((unsigned long) addr & ~3UL); \
804 +#error "how many bits you are?!"
807 +static inline int mb_test_bit(int bit, void *addr)
809 + mb_correct_addr_and_bit(bit,addr);
810 + return ext2_test_bit(bit, addr);
813 +static inline void mb_set_bit(int bit, void *addr)
815 + mb_correct_addr_and_bit(bit,addr);
816 + ext2_set_bit(bit, addr);
819 +static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
821 + mb_correct_addr_and_bit(bit,addr);
822 + ext2_set_bit_atomic(lock, bit, addr);
825 +static inline void mb_clear_bit(int bit, void *addr)
827 + mb_correct_addr_and_bit(bit,addr);
828 + ext2_clear_bit(bit, addr);
831 +static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
833 + mb_correct_addr_and_bit(bit,addr);
834 + ext2_clear_bit_atomic(lock, bit, addr);
837 +static inline int mb_find_next_zero_bit(void *addr, int max, int start)
840 +#if BITS_PER_LONG == 64
841 + fix = ((unsigned long) addr & 7UL) << 3;
842 + addr = (void *) ((unsigned long) addr & ~7UL);
843 +#elif BITS_PER_LONG == 32
844 + fix = ((unsigned long) addr & 3UL) << 3;
845 + addr = (void *) ((unsigned long) addr & ~3UL);
847 +#error "how many bits you are?!"
851 + return ext2_find_next_zero_bit(addr, max, start) - fix;
854 +static inline int mb_find_next_bit(void *addr, int max, int start)
857 +#if BITS_PER_LONG == 64
858 + fix = ((unsigned long) addr & 7UL) << 3;
859 + addr = (void *) ((unsigned long) addr & ~7UL);
860 +#elif BITS_PER_LONG == 32
861 + fix = ((unsigned long) addr & 3UL) << 3;
862 + addr = (void *) ((unsigned long) addr & ~3UL);
864 +#error "how many bits you are?!"
871 + return find_next_bit(addr, max, start) - fix;
875 +static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
879 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
880 + BUG_ON(max == NULL);
882 + if (order > e3b->bd_blkbits + 1) {
887 + /* at order 0 we see each particular block */
888 + *max = 1 << (e3b->bd_blkbits + 3);
890 + return EXT3_MB_BITMAP(e3b);
892 + bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
893 + *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
899 +void mb_free_blocks_double(struct inode *inode, struct ext3_buddy *e3b,
900 + int first, int count)
903 + struct super_block *sb = e3b->bd_sb;
905 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
907 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
908 + for (i = 0; i < count; i++) {
909 + if (!mb_test_bit(first + i, e3b->bd_info->bb_bitmap)) {
910 + unsigned long blocknr;
911 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
912 + blocknr += first + i;
914 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
916 + ext3_error(sb, __FUNCTION__, "double-free of inode"
917 + " %lu's block %lu(bit %u in group %u)\n",
918 + inode ? inode->i_ino : 0, blocknr,
919 + first + i, e3b->bd_group);
921 + mb_clear_bit(first + i, e3b->bd_info->bb_bitmap);
925 +void mb_mark_used_double(struct ext3_buddy *e3b, int first, int count)
928 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
930 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
931 + for (i = 0; i < count; i++) {
932 + BUG_ON(mb_test_bit(first + i, e3b->bd_info->bb_bitmap));
933 + mb_set_bit(first + i, e3b->bd_info->bb_bitmap);
937 +void mb_cmp_bitmaps(struct ext3_buddy *e3b, void *bitmap)
939 + if (memcmp(e3b->bd_info->bb_bitmap, bitmap, e3b->bd_sb->s_blocksize)) {
940 + unsigned char *b1, *b2;
942 + b1 = (unsigned char *) e3b->bd_info->bb_bitmap;
943 + b2 = (unsigned char *) bitmap;
944 + for (i = 0; i < e3b->bd_sb->s_blocksize; i++) {
945 + if (b1[i] != b2[i]) {
946 + printk("corruption in group %u at byte %u(%u): "
947 + "%x in copy != %x on disk/prealloc\n",
948 + e3b->bd_group, i, i * 8, b1[i], b2[i]);
956 +#define mb_free_blocks_double(a,b,c,d)
957 +#define mb_mark_used_double(a,b,c)
958 +#define mb_cmp_bitmaps(a,b)
961 +#ifdef AGGRESSIVE_CHECK
963 +#define MB_CHECK_ASSERT(assert) \
966 + printk (KERN_EMERG \
967 + "Assertion failure in %s() at %s:%d: \"%s\"\n", \
968 + function, file, line, # assert); \
973 +static int __mb_check_buddy(struct ext3_buddy *e3b, char *file,
974 + const char *function, int line)
976 + struct super_block *sb = e3b->bd_sb;
977 + int order = e3b->bd_blkbits + 1;
978 + int max, max2, i, j, k, count;
979 + struct ext3_group_info *grp;
980 + int fragments = 0, fstart;
981 + struct list_head *cur;
982 + void *buddy, *buddy2;
984 + if (!test_opt(sb, MBALLOC))
988 + static int mb_check_counter = 0;
989 + if (mb_check_counter++ % 100 != 0)
993 + while (order > 1) {
994 + buddy = mb_find_buddy(e3b, order, &max);
995 + MB_CHECK_ASSERT(buddy);
996 + buddy2 = mb_find_buddy(e3b, order - 1, &max2);
997 + MB_CHECK_ASSERT(buddy2);
998 + MB_CHECK_ASSERT(buddy != buddy2);
999 + MB_CHECK_ASSERT(max * 2 == max2);
1002 + for (i = 0; i < max; i++) {
1004 + if (mb_test_bit(i, buddy)) {
1005 + /* only single bit in buddy2 may be 1 */
1006 + if (!mb_test_bit(i << 1, buddy2))
1007 + MB_CHECK_ASSERT(mb_test_bit((i<<1)+1, buddy2));
1008 + else if (!mb_test_bit((i << 1) + 1, buddy2))
1009 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1013 + /* both bits in buddy2 must be 0 */
1014 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1015 + MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
1017 + for (j = 0; j < (1 << order); j++) {
1018 + k = (i * (1 << order)) + j;
1019 + MB_CHECK_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
1023 + MB_CHECK_ASSERT(e3b->bd_info->bb_counters[order] == count);
1028 + buddy = mb_find_buddy(e3b, 0, &max);
1029 + for (i = 0; i < max; i++) {
1030 + if (!mb_test_bit(i, buddy)) {
1031 + MB_CHECK_ASSERT(i >= e3b->bd_info->bb_first_free);
1032 + if (fstart == -1) {
1039 + /* check used bits only */
1040 + for (j = 0; j < e3b->bd_blkbits + 1; j++) {
1041 + buddy2 = mb_find_buddy(e3b, j, &max2);
1043 + MB_CHECK_ASSERT(k < max2);
1044 + MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
1047 + MB_CHECK_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
1048 + MB_CHECK_ASSERT(e3b->bd_info->bb_fragments == fragments);
1050 + grp = EXT3_GROUP_INFO(sb, e3b->bd_group);
1051 + buddy = mb_find_buddy(e3b, 0, &max);
1052 + list_for_each(cur, &grp->bb_prealloc_list) {
1053 + unsigned long groupnr;
1054 + struct ext3_prealloc_space *pa;
1055 + pa = list_entry(cur, struct ext3_prealloc_space, group_list);
1056 + ext3_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
1057 + MB_CHECK_ASSERT(groupnr == e3b->bd_group);
1058 + for (i = 0; i < pa->len; i++)
1059 + MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
1063 +#undef MB_CHECK_ASSERT
1064 +#define mb_check_buddy(e3b) __mb_check_buddy(e3b,__FILE__,__FUNCTION__,__LINE__)
1066 +#define mb_check_buddy(e3b)
1069 +/* find most significant bit */
1070 +static int inline fmsb(unsigned short word)
1084 + } while (word != 0);
1090 +ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
1091 + int len, struct ext3_group_info *grp)
1093 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1094 + unsigned short min, max, chunk, border;
1096 + BUG_ON(len >= EXT3_BLOCKS_PER_GROUP(sb));
1098 + border = 2 << sb->s_blocksize_bits;
1101 + /* find how many blocks can be covered since this position */
1102 + max = ffs(first | border) - 1;
1104 + /* find how many blocks of power 2 we need to mark */
1111 + /* mark multiblock chunks only */
1112 + grp->bb_counters[min]++;
1114 + mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
1122 +ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
1125 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
1126 + unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
1127 + unsigned short i = 0, first, len;
1128 + unsigned free = 0, fragments = 0;
1129 + unsigned long long period = get_cycles();
1131 + /* initialize buddy from bitmap which is aggregation
1132 + * of on-disk bitmap and preallocations */
1133 + i = mb_find_next_zero_bit(bitmap, max, 0);
1134 + grp->bb_first_free = i;
1138 + i = ext2_find_next_le_bit(bitmap, max, i);
1144 + ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
1146 + grp->bb_counters[0]++;
1148 + i = mb_find_next_zero_bit(bitmap, max, i);
1150 + grp->bb_fragments = fragments;
1152 + if (free != grp->bb_free) {
1153 + printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
1154 + group, free, grp->bb_free);
1155 + grp->bb_free = free;
1158 + clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
1160 + period = get_cycles() - period;
1161 + spin_lock(&EXT3_SB(sb)->s_bal_lock);
1162 + EXT3_SB(sb)->s_mb_buddies_generated++;
1163 + EXT3_SB(sb)->s_mb_generation_time += period;
1164 + spin_unlock(&EXT3_SB(sb)->s_bal_lock);
1167 +static int ext3_mb_init_cache(struct page *page, char *incore)
1169 + int blocksize, blocks_per_page, groups_per_page;
1170 + int err = 0, i, first_group, first_block;
1171 + struct super_block *sb;
1172 + struct buffer_head *bhs;
1173 + struct buffer_head **bh;
1174 + struct inode *inode;
1175 + char *data, *bitmap;
1177 + mb_debug("init page %lu\n", page->index);
1179 + inode = page->mapping->host;
1181 + blocksize = 1 << inode->i_blkbits;
1182 + blocks_per_page = PAGE_CACHE_SIZE / blocksize;
1184 + groups_per_page = blocks_per_page >> 1;
1185 + if (groups_per_page == 0)
1186 + groups_per_page = 1;
1188 + /* allocate buffer_heads to read bitmaps */
1189 + if (groups_per_page > 1) {
1191 + i = sizeof(struct buffer_head *) * groups_per_page;
1192 + bh = kmalloc(i, GFP_NOFS);
1199 + first_group = page->index * blocks_per_page / 2;
1201 + /* read all groups the page covers into the cache */
1202 + for (i = 0; i < groups_per_page; i++) {
1203 + struct ext3_group_desc * desc;
1205 + if (first_group + i >= EXT3_SB(sb)->s_groups_count)
1209 + desc = ext3_get_group_desc(sb, first_group + i, NULL);
1214 + bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
1215 + if (bh[i] == NULL)
1218 + if (buffer_uptodate(bh[i]))
1221 + lock_buffer(bh[i]);
1222 + if (buffer_uptodate(bh[i])) {
1223 + unlock_buffer(bh[i]);
1228 + bh[i]->b_end_io = end_buffer_read_sync;
1229 + submit_bh(READ, bh[i]);
1230 + mb_debug("read bitmap for group %u\n", first_group + i);
1233 + /* wait for I/O completion */
1234 + for (i = 0; i < groups_per_page && bh[i]; i++)
1235 + wait_on_buffer(bh[i]);
1238 + for (i = 0; i < groups_per_page && bh[i]; i++)
1239 + if (!buffer_uptodate(bh[i]))
1242 + first_block = page->index * blocks_per_page;
1243 + for (i = 0; i < blocks_per_page; i++) {
1246 + group = (first_block + i) >> 1;
1247 + if (group >= EXT3_SB(sb)->s_groups_count)
1250 + data = page_address(page) + (i * blocksize);
1251 + bitmap = bh[group - first_group]->b_data;
1253 + if ((first_block + i) & 1) {
1254 + /* this is block of buddy */
1255 + BUG_ON(incore == NULL);
1256 + mb_debug("put buddy for group %u in page %lu/%x\n",
1257 + group, page->index, i * blocksize);
1258 + memset(data, 0xff, blocksize);
1259 + EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
1260 + memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
1261 + sizeof(unsigned short)*(sb->s_blocksize_bits+2));
1262 + ext3_mb_generate_buddy(sb, data, incore, group);
1265 + /* this is block of bitmap */
1266 + BUG_ON(incore != NULL);
1267 + mb_debug("put bitmap for group %u in page %lu/%x\n",
1268 + group, page->index, i * blocksize);
1270 + /* see comments in ext3_mb_put_pa() */
1271 + ext3_lock_group(sb, group);
1272 + memcpy(data, bitmap, blocksize);
1274 + /* mark all preallocated blocks used in in-core bitmap */
1275 + ext3_mb_generate_from_pa(sb, data, group);
1276 + ext3_unlock_group(sb, group);
1281 + SetPageUptodate(page);
1285 + for (i = 0; i < groups_per_page && bh[i]; i++)
1293 +static int ext3_mb_load_buddy(struct super_block *sb, int group,
1294 + struct ext3_buddy *e3b)
1296 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1297 + struct inode *inode = sbi->s_buddy_cache;
1298 + int blocks_per_page, block, pnum, poff;
1299 + struct page *page;
1301 + mb_debug("load group %u\n", group);
1303 + blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1305 + e3b->bd_blkbits = sb->s_blocksize_bits;
1306 + e3b->bd_info = EXT3_GROUP_INFO(sb, group);
1308 + e3b->bd_group = group;
1309 + e3b->bd_buddy_page = NULL;
1310 + e3b->bd_bitmap_page = NULL;
1312 + block = group * 2;
1313 + pnum = block / blocks_per_page;
1314 + poff = block % blocks_per_page;
1316 + /* we could use find_or_create_page(), but it locks page
1317 + * what we'd like to avoid in fast path ... */
1318 + page = find_get_page(inode->i_mapping, pnum);
1319 + if (page == NULL || !PageUptodate(page)) {
1321 + page_cache_release(page);
1322 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1324 + BUG_ON(page->mapping != inode->i_mapping);
1325 + if (!PageUptodate(page)) {
1326 + ext3_mb_init_cache(page, NULL);
1327 + mb_cmp_bitmaps(e3b, page_address(page) +
1328 + (poff * sb->s_blocksize));
1330 + unlock_page(page);
1333 + if (page == NULL || !PageUptodate(page))
1335 + e3b->bd_bitmap_page = page;
1336 + e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1337 + mark_page_accessed(page);
1340 + pnum = block / blocks_per_page;
1341 + poff = block % blocks_per_page;
1343 + page = find_get_page(inode->i_mapping, pnum);
1344 + if (page == NULL || !PageUptodate(page)) {
1346 + page_cache_release(page);
1347 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1349 + BUG_ON(page->mapping != inode->i_mapping);
1350 + if (!PageUptodate(page))
1351 + ext3_mb_init_cache(page, e3b->bd_bitmap);
1353 + unlock_page(page);
1356 + if (page == NULL || !PageUptodate(page))
1358 + e3b->bd_buddy_page = page;
1359 + e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1360 + mark_page_accessed(page);
1362 + BUG_ON(e3b->bd_bitmap_page == NULL);
1363 + BUG_ON(e3b->bd_buddy_page == NULL);
1368 + if (e3b->bd_bitmap_page)
1369 + page_cache_release(e3b->bd_bitmap_page);
1370 + if (e3b->bd_buddy_page)
1371 + page_cache_release(e3b->bd_buddy_page);
1372 + e3b->bd_buddy = NULL;
1373 + e3b->bd_bitmap = NULL;
1377 +static void ext3_mb_release_desc(struct ext3_buddy *e3b)
1379 + if (e3b->bd_bitmap_page)
1380 + page_cache_release(e3b->bd_bitmap_page);
1381 + if (e3b->bd_buddy_page)
1382 + page_cache_release(e3b->bd_buddy_page);
1386 +static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
1391 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
1392 + BUG_ON(block >= (1 << (e3b->bd_blkbits + 3)));
1394 + bb = EXT3_MB_BUDDY(e3b);
1395 + while (order <= e3b->bd_blkbits + 1) {
1396 + block = block >> 1;
1397 + if (!mb_test_bit(block, bb)) {
1398 + /* this block is part of buddy of order 'order' */
1401 + bb += 1 << (e3b->bd_blkbits - order);
1407 +static inline void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1412 + while (cur < len) {
1413 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1414 + /* fast path: clear whole word at once */
1415 + addr = bm + (cur >> 3);
1420 + mb_clear_bit_atomic(lock, cur, bm);
1425 +static inline void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1430 + while (cur < len) {
1431 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1432 + /* fast path: clear whole word at once */
1433 + addr = bm + (cur >> 3);
1434 + *addr = 0xffffffff;
1438 + mb_set_bit_atomic(lock, cur, bm);
1443 +static int mb_free_blocks(struct inode *inode, struct ext3_buddy *e3b,
1444 + int first, int count)
1446 + int block = 0, max = 0, order;
1447 + void *buddy, *buddy2;
1448 + struct super_block *sb = e3b->bd_sb;
1450 + BUG_ON(first + count > (sb->s_blocksize << 3));
1451 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
1452 + mb_check_buddy(e3b);
1453 + mb_free_blocks_double(inode, e3b, first, count);
1455 + e3b->bd_info->bb_free += count;
1456 + if (first < e3b->bd_info->bb_first_free)
1457 + e3b->bd_info->bb_first_free = first;
1459 + /* let's maintain fragments counter */
1461 + block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
1462 + if (first + count < EXT3_SB(sb)->s_mb_maxs[0])
1463 + max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
1465 + e3b->bd_info->bb_fragments--;
1466 + else if (!block && !max)
1467 + e3b->bd_info->bb_fragments++;
1469 + /* let's maintain buddy itself */
1470 + while (count-- > 0) {
1474 + if (!mb_test_bit(block, EXT3_MB_BITMAP(e3b))) {
1475 + unsigned long blocknr;
1476 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
1479 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
1481 + ext3_error(sb, __FUNCTION__, "double-free of inode"
1482 + " %lu's block %lu(bit %u in group %u)\n",
1483 + inode ? inode->i_ino : 0, blocknr, block,
1486 + mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
1487 + e3b->bd_info->bb_counters[order]++;
1489 + /* start of the buddy */
1490 + buddy = mb_find_buddy(e3b, order, &max);
1494 + if (mb_test_bit(block, buddy) ||
1495 + mb_test_bit(block + 1, buddy))
1498 + /* both the buddies are free, try to coalesce them */
1499 + buddy2 = mb_find_buddy(e3b, order + 1, &max);
1505 + /* for special purposes, we don't set
1506 + * free bits in bitmap */
1507 + mb_set_bit(block, buddy);
1508 + mb_set_bit(block + 1, buddy);
1510 + e3b->bd_info->bb_counters[order]--;
1511 + e3b->bd_info->bb_counters[order]--;
1513 + block = block >> 1;
1515 + e3b->bd_info->bb_counters[order]++;
1517 + mb_clear_bit(block, buddy2);
1521 + mb_check_buddy(e3b);
1526 +static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
1527 + int needed, struct ext3_free_extent *ex)
1529 + int next = block, max, ord;
1532 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1533 + BUG_ON(ex == NULL);
1535 + buddy = mb_find_buddy(e3b, order, &max);
1536 + BUG_ON(buddy == NULL);
1537 + BUG_ON(block >= max);
1538 + if (mb_test_bit(block, buddy)) {
1545 + if (likely(order == 0)) {
1546 + /* find actual order */
1547 + order = mb_find_order_for_block(e3b, block);
1548 + block = block >> order;
1551 + ex->fe_len = 1 << order;
1552 + ex->fe_start = block << order;
1553 + ex->fe_group = e3b->bd_group;
1555 + /* calc difference from given start */
1556 + next = next - ex->fe_start;
1557 + ex->fe_len -= next;
1558 + ex->fe_start += next;
1560 + while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
1562 + if (block + 1 >= max)
1565 + next = (block + 1) * (1 << order);
1566 + if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
1569 + ord = mb_find_order_for_block(e3b, next);
1572 + block = next >> order;
1573 + ex->fe_len += 1 << order;
1576 + BUG_ON(ex->fe_start + ex->fe_len > (1 << (e3b->bd_blkbits + 3)));
1577 + return ex->fe_len;
1580 +static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
1582 + int ord, mlen = 0, max = 0, cur;
1583 + int start = ex->fe_start;
1584 + int len = ex->fe_len;
1589 + BUG_ON(start + len > (e3b->bd_sb->s_blocksize << 3));
1590 + BUG_ON(e3b->bd_group != ex->fe_group);
1591 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1592 + mb_check_buddy(e3b);
1593 + mb_mark_used_double(e3b, start, len);
1595 + e3b->bd_info->bb_free -= len;
1596 + if (e3b->bd_info->bb_first_free == start)
1597 + e3b->bd_info->bb_first_free += len;
1599 + /* let's maintain fragments counter */
1601 + mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
1602 + if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
1603 + max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
1605 + e3b->bd_info->bb_fragments++;
1606 + else if (!mlen && !max)
1607 + e3b->bd_info->bb_fragments--;
1609 + /* let's maintain buddy itself */
1611 + ord = mb_find_order_for_block(e3b, start);
1613 + if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1614 + /* the whole chunk may be allocated at once! */
1616 + buddy = mb_find_buddy(e3b, ord, &max);
1617 + BUG_ON((start >> ord) >= max);
1618 + mb_set_bit(start >> ord, buddy);
1619 + e3b->bd_info->bb_counters[ord]--;
1626 + /* store for history */
1628 + ret = len | (ord << 16);
1630 + /* we have to split large buddy */
1632 + buddy = mb_find_buddy(e3b, ord, &max);
1633 + mb_set_bit(start >> ord, buddy);
1634 + e3b->bd_info->bb_counters[ord]--;
1637 + cur = (start >> ord) & ~1U;
1638 + buddy = mb_find_buddy(e3b, ord, &max);
1639 + mb_clear_bit(cur, buddy);
1640 + mb_clear_bit(cur + 1, buddy);
1641 + e3b->bd_info->bb_counters[ord]++;
1642 + e3b->bd_info->bb_counters[ord]++;
1645 + mb_set_bits(sb_bgl_lock(EXT3_SB(e3b->bd_sb), ex->fe_group),
1646 + EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
1647 + mb_check_buddy(e3b);
1653 + * Must be called under group lock!
1655 +static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
1656 + struct ext3_buddy *e3b)
1658 + unsigned long ret;
1660 + BUG_ON(ac->ac_b_ex.fe_group != e3b->bd_group);
1661 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1663 + ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1664 + ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1665 + ret = mb_mark_used(e3b, &ac->ac_b_ex);
1667 + /* preallocation can change ac_b_ex, thus we store actually
1668 + * allocated blocks for history */
1669 + ac->ac_f_ex = ac->ac_b_ex;
1671 + ac->ac_status = AC_STATUS_FOUND;
1672 + ac->ac_tail = ret & 0xffff;
1673 + ac->ac_buddy = ret >> 16;
1675 + /* XXXXXXX: SUCH A HORRIBLE **CK */
1676 + ac->ac_bitmap_page = e3b->bd_bitmap_page;
1677 + get_page(ac->ac_bitmap_page);
1678 + ac->ac_buddy_page = e3b->bd_buddy_page;
1679 + get_page(ac->ac_buddy_page);
1683 + * regular allocator, for general purposes allocation
1686 +void ext3_mb_check_limits(struct ext3_allocation_context *ac,
1687 + struct ext3_buddy *e3b,
1690 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1691 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1692 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1693 + struct ext3_free_extent ex;
1697 + * We don't want to scan for a whole year
1699 + if (ac->ac_found > sbi->s_mb_max_to_scan &&
1700 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1701 + ac->ac_status = AC_STATUS_BREAK;
1706 + * Haven't found good chunk so far, let's continue
1708 + if (bex->fe_len < gex->fe_len)
1711 + if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1712 + && bex->fe_group == e3b->bd_group) {
1713 + /* recheck chunk's availability - we don't know
1714 + * when it was found (within this lock-unlock
1715 + * period or not) */
1716 + max = mb_find_extent(e3b, 0, bex->fe_start, gex->fe_len, &ex);
1717 + if (max >= gex->fe_len) {
1718 + ext3_mb_use_best_found(ac, e3b);
1725 + * The routine checks whether found extent is good enough. If it is,
1726 + * then the extent gets marked used and flag is set to the context
1727 + * to stop scanning. Otherwise, the extent is compared with the
1728 + * previous found extent and if new one is better, then it's stored
1729 + * in the context. Later, the best found extent will be used, if
1730 + * mballoc can't find good enough extent.
1732 + * FIXME: real allocation policy is to be designed yet!
1734 +static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
1735 + struct ext3_free_extent *ex,
1736 + struct ext3_buddy *e3b)
1738 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1739 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1741 + BUG_ON(ex->fe_len <= 0);
1742 + BUG_ON(ex->fe_len >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1743 + BUG_ON(ex->fe_start >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1744 + BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1749 + * The special case - take what you catch first
1751 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1753 + ext3_mb_use_best_found(ac, e3b);
1758 + * Let's check whether the chuck is good enough
1760 + if (ex->fe_len == gex->fe_len) {
1762 + ext3_mb_use_best_found(ac, e3b);
1767 + * If this is first found extent, just store it in the context
1769 + if (bex->fe_len == 0) {
1775 + * If new found extent is better, store it in the context
1777 + if (bex->fe_len < gex->fe_len) {
1778 + /* if the request isn't satisfied, any found extent
1779 + * larger than previous best one is better */
1780 + if (ex->fe_len > bex->fe_len)
1782 + } else if (ex->fe_len > gex->fe_len) {
1783 + /* if the request is satisfied, then we try to find
1784 + * an extent that still satisfy the request, but is
1785 + * smaller than previous one */
1789 + ext3_mb_check_limits(ac, e3b, 0);
1792 +static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
1793 + struct ext3_buddy *e3b)
1795 + struct ext3_free_extent ex = ac->ac_b_ex;
1796 + int group = ex.fe_group, max, err;
1798 + BUG_ON(ex.fe_len <= 0);
1799 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1803 + ext3_lock_group(ac->ac_sb, group);
1804 + max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
1808 + ext3_mb_use_best_found(ac, e3b);
1811 + ext3_unlock_group(ac->ac_sb, group);
1812 + ext3_mb_release_desc(e3b);
1817 +static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
1818 + struct ext3_buddy *e3b)
1820 + int group = ac->ac_g_ex.fe_group, max, err;
1821 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1822 + struct ext3_super_block *es = sbi->s_es;
1823 + struct ext3_free_extent ex;
1825 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1829 + ext3_lock_group(ac->ac_sb, group);
1830 + max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
1831 + ac->ac_g_ex.fe_len, &ex);
1833 + if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1834 + unsigned long start;
1835 + start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
1836 + ex.fe_start + le32_to_cpu(es->s_first_data_block));
1837 + if (start % sbi->s_stripe == 0) {
1840 + ext3_mb_use_best_found(ac, e3b);
1842 + } else if (max >= ac->ac_g_ex.fe_len) {
1843 + BUG_ON(ex.fe_len <= 0);
1844 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1845 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1848 + ext3_mb_use_best_found(ac, e3b);
1849 + } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
1850 + /* Sometimes, caller may want to merge even small
1851 + * number of blocks to an existing extent */
1852 + BUG_ON(ex.fe_len <= 0);
1853 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1854 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1857 + ext3_mb_use_best_found(ac, e3b);
1859 + ext3_unlock_group(ac->ac_sb, group);
1860 + ext3_mb_release_desc(e3b);
1866 + * The routine scans buddy structures (not bitmap!) from given order
1867 + * to max order and tries to find big enough chunk to satisfy the req
1869 +static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
1870 + struct ext3_buddy *e3b)
1872 + struct super_block *sb = ac->ac_sb;
1873 + struct ext3_group_info *grp = e3b->bd_info;
1877 + BUG_ON(ac->ac_2order <= 0);
1878 + for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1879 + if (grp->bb_counters[i] == 0)
1882 + buddy = mb_find_buddy(e3b, i, &max);
1883 + BUG_ON(buddy == NULL);
1885 + k = mb_find_next_zero_bit(buddy, max, 0);
1890 + ac->ac_b_ex.fe_len = 1 << i;
1891 + ac->ac_b_ex.fe_start = k << i;
1892 + ac->ac_b_ex.fe_group = e3b->bd_group;
1894 + ext3_mb_use_best_found(ac, e3b);
1896 + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1898 + if (EXT3_SB(sb)->s_mb_stats)
1899 + atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
1906 + * The routine scans the group and measures all found extents.
1907 + * In order to optimize scanning, caller must pass number of
1908 + * free blocks in the group, so the routine can know upper limit.
1910 +static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
1911 + struct ext3_buddy *e3b)
1913 + struct super_block *sb = ac->ac_sb;
1914 + void *bitmap = EXT3_MB_BITMAP(e3b);
1915 + struct ext3_free_extent ex;
1918 + free = e3b->bd_info->bb_free;
1919 + BUG_ON(free <= 0);
1921 + i = e3b->bd_info->bb_first_free;
1923 + while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1924 + i = mb_find_next_zero_bit(bitmap, EXT3_BLOCKS_PER_GROUP(sb), i);
1925 + if (i >= EXT3_BLOCKS_PER_GROUP(sb)) {
1926 + BUG_ON(free != 0);
1930 + mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
1931 + BUG_ON(ex.fe_len <= 0);
1932 + BUG_ON(free < ex.fe_len);
1934 + ext3_mb_measure_extent(ac, &ex, e3b);
1937 + free -= ex.fe_len;
1940 + ext3_mb_check_limits(ac, e3b, 1);
1944 + * This is a special case for storages like raid5
1945 + * we try to find stripe-aligned chunks for stripe-size requests
1947 +static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
1948 + struct ext3_buddy *e3b)
1950 + struct super_block *sb = ac->ac_sb;
1951 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1952 + void *bitmap = EXT3_MB_BITMAP(e3b);
1953 + struct ext3_free_extent ex;
1954 + unsigned long i, max;
1956 + BUG_ON(sbi->s_stripe == 0);
1958 + /* find first stripe-aligned block */
1959 + i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
1960 + + le32_to_cpu(sbi->s_es->s_first_data_block);
1961 + i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
1962 + i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
1963 + % EXT3_BLOCKS_PER_GROUP(sb);
1965 + while (i < EXT3_BLOCKS_PER_GROUP(sb)) {
1966 + if (!mb_test_bit(i, bitmap)) {
1967 + max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
1968 + if (max >= sbi->s_stripe) {
1971 + ext3_mb_use_best_found(ac, e3b);
1975 + i += sbi->s_stripe;
1979 +static int ext3_mb_good_group(struct ext3_allocation_context *ac,
1980 + int group, int cr)
1982 + struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
1983 + unsigned free, fragments, i, bits;
1985 + BUG_ON(cr < 0 || cr >= 4);
1986 + BUG_ON(EXT3_MB_GRP_NEED_INIT(grp));
1988 + free = grp->bb_free;
1989 + fragments = grp->bb_fragments;
1992 + if (fragments == 0)
1997 + BUG_ON(ac->ac_2order == 0);
1998 + bits = ac->ac_sb->s_blocksize_bits + 1;
1999 + for (i = ac->ac_2order; i <= bits; i++)
2000 + if (grp->bb_counters[i] > 0)
2004 + if ((free / fragments) >= ac->ac_g_ex.fe_len)
2008 + if (free >= ac->ac_g_ex.fe_len)
2020 +int ext3_mb_regular_allocator(struct ext3_allocation_context *ac)
2022 + int group, i, cr, err = 0;
2023 + struct ext3_sb_info *sbi;
2024 + struct super_block *sb;
2025 + struct ext3_buddy e3b;
2028 + sbi = EXT3_SB(sb);
2029 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2031 + /* first, try the goal */
2032 + err = ext3_mb_find_by_goal(ac, &e3b);
2033 + if (err || ac->ac_status == AC_STATUS_FOUND)
2036 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
2039 + i = ffs(ac->ac_g_ex.fe_len);
2040 + ac->ac_2order = 0;
2041 + if (i >= sbi->s_mb_order2_reqs) {
2043 + if ((ac->ac_g_ex.fe_len & (~(1 << i))) == 0)
2044 + ac->ac_2order = i;
2047 + group = ac->ac_g_ex.fe_group;
2049 + /* Let's just scan groups to find more-less suitable blocks */
2050 + cr = ac->ac_2order ? 0 : 1;
2052 + for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2053 + ac->ac_criteria = cr;
2054 + for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
2055 + struct ext3_group_info *grp;
2057 + if (group == EXT3_SB(sb)->s_groups_count)
2060 + /* quick check to skip empty groups */
2061 + grp = EXT3_GROUP_INFO(ac->ac_sb, group);
2062 + if (grp->bb_free == 0)
2065 + if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
2066 + /* we need full data about the group
2067 + * to make a good selection */
2068 + err = ext3_mb_load_buddy(sb, group, &e3b);
2071 + ext3_mb_release_desc(&e3b);
2074 + /* check is group good for our criteries */
2075 + if (!ext3_mb_good_group(ac, group, cr))
2078 + err = ext3_mb_load_buddy(sb, group, &e3b);
2082 + ext3_lock_group(sb, group);
2083 + if (!ext3_mb_good_group(ac, group, cr)) {
2084 + /* someone did allocation from this group */
2085 + ext3_unlock_group(sb, group);
2086 + ext3_mb_release_desc(&e3b);
2090 + ac->ac_groups_scanned++;
2092 + ext3_mb_simple_scan_group(ac, &e3b);
2093 + else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe)
2094 + ext3_mb_scan_aligned(ac, &e3b);
2096 + ext3_mb_complex_scan_group(ac, &e3b);
2098 + ext3_unlock_group(sb, group);
2099 + ext3_mb_release_desc(&e3b);
2101 + if (ac->ac_status != AC_STATUS_CONTINUE)
2106 + if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2107 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
2109 + * We've been searching too long. Let's try to allocate
2110 + * the best chunk we've found so far
2113 + ext3_mb_try_best_found(ac, &e3b);
2114 + if (ac->ac_status != AC_STATUS_FOUND) {
2116 + * Someone more lucky has already allocated it.
2117 + * The only thing we can do is just take first
2119 + printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
2121 + ac->ac_b_ex.fe_group = 0;
2122 + ac->ac_b_ex.fe_start = 0;
2123 + ac->ac_b_ex.fe_len = 0;
2124 + ac->ac_status = AC_STATUS_CONTINUE;
2125 + ac->ac_flags |= EXT3_MB_HINT_FIRST;
2127 + atomic_inc(&sbi->s_mb_lost_chunks);
2135 +#ifdef EXT3_MB_HISTORY
2136 +struct ext3_mb_proc_session {
2137 + struct ext3_mb_history *history;
2138 + struct super_block *sb;
2143 +static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
2144 + struct ext3_mb_history *hs,
2147 + if (hs == s->history + s->max)
2149 + if (!first && hs == s->history + s->start)
2151 + while (hs->orig.fe_len == 0) {
2153 + if (hs == s->history + s->max)
2155 + if (hs == s->history + s->start)
2161 +static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2163 + struct ext3_mb_proc_session *s = seq->private;
2164 + struct ext3_mb_history *hs;
2168 + return SEQ_START_TOKEN;
2169 + hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2172 + while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2176 +static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
2178 + struct ext3_mb_proc_session *s = seq->private;
2179 + struct ext3_mb_history *hs = v;
2182 + if (v == SEQ_START_TOKEN)
2183 + return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2185 + return ext3_mb_history_skip_empty(s, ++hs, 0);
2188 +static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
2190 + char buf[25], buf2[25], buf3[25], *fmt;
2191 + struct ext3_mb_history *hs = v;
2193 + if (v == SEQ_START_TOKEN) {
2194 + seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2195 + "%-5s %-2s %-5s %-5s %-5s %-6s\n",
2196 + "pid", "inode", "original", "goal", "result","found",
2197 + "grps", "cr", "flags", "merge", "tail", "broken");
2201 + if (hs->op == EXT3_MB_HISTORY_ALLOC) {
2202 + fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2203 + "%-5u %-5s %-5u %-6u\n";
2204 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2205 + hs->result.fe_start, hs->result.fe_len,
2206 + hs->result.fe_logical);
2207 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2208 + hs->orig.fe_start, hs->orig.fe_len,
2209 + hs->orig.fe_logical);
2210 + sprintf(buf3, "%lu/%lu/%lu@%lu", hs->goal.fe_group,
2211 + hs->goal.fe_start, hs->goal.fe_len,
2212 + hs->goal.fe_logical);
2213 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2214 + hs->found, hs->groups, hs->cr, hs->flags,
2215 + hs->merged ? "M" : "", hs->tail,
2216 + hs->buddy ? 1 << hs->buddy : 0);
2217 + } else if (hs->op == EXT3_MB_HISTORY_PREALLOC) {
2218 + fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2219 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2220 + hs->result.fe_start, hs->result.fe_len,
2221 + hs->result.fe_logical);
2222 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2223 + hs->orig.fe_start, hs->orig.fe_len,
2224 + hs->orig.fe_logical);
2225 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2226 + } else if (hs->op == EXT3_MB_HISTORY_DISCARD) {
2227 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2228 + hs->result.fe_start, hs->result.fe_len);
2229 + seq_printf(seq, "%-5u %-8u %-23s discard\n",
2230 + hs->pid, hs->ino, buf2);
2231 + } else if (hs->op == EXT3_MB_HISTORY_FREE) {
2232 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2233 + hs->result.fe_start, hs->result.fe_len);
2234 + seq_printf(seq, "%-5u %-8u %-23s free\n",
2235 + hs->pid, hs->ino, buf2);
2240 +static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
2244 +static struct seq_operations ext3_mb_seq_history_ops = {
2245 + .start = ext3_mb_seq_history_start,
2246 + .next = ext3_mb_seq_history_next,
2247 + .stop = ext3_mb_seq_history_stop,
2248 + .show = ext3_mb_seq_history_show,
2251 +static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
2253 + struct super_block *sb = PDE(inode)->data;
2254 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2255 + struct ext3_mb_proc_session *s;
2258 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2262 + size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
2263 + s->history = kmalloc(size, GFP_KERNEL);
2264 + if (s->history == NULL) {
2269 + spin_lock(&sbi->s_mb_history_lock);
2270 + memcpy(s->history, sbi->s_mb_history, size);
2271 + s->max = sbi->s_mb_history_max;
2272 + s->start = sbi->s_mb_history_cur % s->max;
2273 + spin_unlock(&sbi->s_mb_history_lock);
2275 + rc = seq_open(file, &ext3_mb_seq_history_ops);
2277 + struct seq_file *m = (struct seq_file *)file->private_data;
2280 + kfree(s->history);
2287 +static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
2289 + struct seq_file *seq = (struct seq_file *)file->private_data;
2290 + struct ext3_mb_proc_session *s = seq->private;
2291 + kfree(s->history);
2293 + return seq_release(inode, file);
2296 +static ssize_t ext3_mb_seq_history_write(struct file *file,
2297 + const char __user *buffer,
2298 + size_t count, loff_t *ppos)
2300 + struct seq_file *seq = (struct seq_file *)file->private_data;
2301 + struct ext3_mb_proc_session *s = seq->private;
2302 + struct super_block *sb = s->sb;
2306 + if (count >= sizeof(str)) {
2307 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2308 + "mb_history", (int)sizeof(str));
2309 + return -EOVERFLOW;
2312 + if (copy_from_user(str, buffer, count))
2315 + value = simple_strtol(str, NULL, 0);
2318 + EXT3_SB(sb)->s_mb_history_filter = value;
2323 +static struct file_operations ext3_mb_seq_history_fops = {
2324 + .owner = THIS_MODULE,
2325 + .open = ext3_mb_seq_history_open,
2327 + .write = ext3_mb_seq_history_write,
2328 + .llseek = seq_lseek,
2329 + .release = ext3_mb_seq_history_release,
2332 +static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2334 + struct super_block *sb = seq->private;
2335 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2338 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2342 + return (void *) group;
2345 +static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2347 + struct super_block *sb = seq->private;
2348 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2352 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2355 + return (void *) group;;
2358 +static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
2360 + struct super_block *sb = seq->private;
2361 + long group = (long) v;
2363 + struct ext3_buddy e3b;
2365 + struct ext3_group_info info;
2366 + unsigned short counters[16];
2371 + seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2372 + "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2373 + "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2374 + "group", "free", "frags", "first",
2375 + "2^0", "2^1", "2^2", "2^3", "2^4", "2^5","2^6",
2376 + "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2378 + i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2379 + sizeof(struct ext3_group_info);
2380 + err = ext3_mb_load_buddy(sb, group, &e3b);
2382 + seq_printf(seq, "#%-5lu: I/O error\n", group);
2385 + ext3_lock_group(sb, group);
2386 + memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
2387 + ext3_unlock_group(sb, group);
2388 + ext3_mb_release_desc(&e3b);
2390 + seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2391 + sg.info.bb_fragments, sg.info.bb_first_free);
2392 + for (i = 0; i <= 13; i++)
2393 + seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2394 + sg.info.bb_counters[i] : 0);
2395 + seq_printf(seq, " ]\n");
2400 +static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
2404 +static struct seq_operations ext3_mb_seq_groups_ops = {
2405 + .start = ext3_mb_seq_groups_start,
2406 + .next = ext3_mb_seq_groups_next,
2407 + .stop = ext3_mb_seq_groups_stop,
2408 + .show = ext3_mb_seq_groups_show,
2411 +static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
2413 + struct super_block *sb = PDE(inode)->data;
2416 + rc = seq_open(file, &ext3_mb_seq_groups_ops);
2418 + struct seq_file *m = (struct seq_file *)file->private_data;
2425 +static struct file_operations ext3_mb_seq_groups_fops = {
2426 + .owner = THIS_MODULE,
2427 + .open = ext3_mb_seq_groups_open,
2429 + .llseek = seq_lseek,
2430 + .release = seq_release,
2433 +static void ext3_mb_history_release(struct super_block *sb)
2435 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2437 + remove_proc_entry("mb_groups", sbi->s_dev_proc);
2438 + remove_proc_entry("mb_history", sbi->s_dev_proc);
2440 + if (sbi->s_mb_history)
2441 + kfree(sbi->s_mb_history);
2444 +static void ext3_mb_history_init(struct super_block *sb)
2446 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2449 + if (sbi->s_dev_proc != NULL) {
2450 + struct proc_dir_entry *p;
2451 + p = create_proc_entry("mb_history", S_IRUGO, sbi->s_dev_proc);
2453 + p->proc_fops = &ext3_mb_seq_history_fops;
2456 + p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_dev_proc);
2458 + p->proc_fops = &ext3_mb_seq_groups_fops;
2463 + sbi->s_mb_history_max = 1000;
2464 + sbi->s_mb_history_cur = 0;
2465 + spin_lock_init(&sbi->s_mb_history_lock);
2466 + i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
2467 + sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
2468 + if (likely(sbi->s_mb_history != NULL))
2469 + memset(sbi->s_mb_history, 0, i);
2470 + /* if we can't allocate history, then we simple won't use it */
2474 +ext3_mb_store_history(struct ext3_allocation_context *ac)
2476 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
2477 + struct ext3_mb_history h;
2479 + if (unlikely(sbi->s_mb_history == NULL))
2482 + if (!(ac->ac_op & sbi->s_mb_history_filter))
2486 + h.pid = current->pid;
2487 + h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2488 + h.orig = ac->ac_o_ex;
2489 + h.result = ac->ac_b_ex;
2490 + h.flags = ac->ac_flags;
2492 + if (ac->ac_op == EXT3_MB_HISTORY_ALLOC) {
2493 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2494 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2496 + h.goal = ac->ac_g_ex;
2497 + h.result = ac->ac_f_ex;
2500 + spin_lock(&sbi->s_mb_history_lock);
2501 + memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2502 + if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2503 + sbi->s_mb_history_cur = 0;
2504 + spin_unlock(&sbi->s_mb_history_lock);
2508 +#define ext3_mb_history_release(sb)
2509 +#define ext3_mb_history_init(sb)
2512 +int ext3_mb_init_backend(struct super_block *sb)
2514 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2515 + int i, j, len, metalen;
2516 + int num_meta_group_infos =
2517 + (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2518 + EXT3_DESC_PER_BLOCK_BITS(sb);
2519 + struct ext3_group_info **meta_group_info;
2521 + /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2522 + * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2523 + * So a two level scheme suffices for now. */
2524 + sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
2525 + num_meta_group_infos, GFP_KERNEL);
2526 + if (sbi->s_group_info == NULL) {
2527 + printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
2530 + sbi->s_buddy_cache = new_inode(sb);
2531 + if (sbi->s_buddy_cache == NULL) {
2532 + printk(KERN_ERR "EXT3-fs: can't get new inode\n");
2535 + EXT3_I(sbi->s_buddy_cache)->i_disksize = 0;
2537 + metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
2538 + for (i = 0; i < num_meta_group_infos; i++) {
2539 + if ((i + 1) == num_meta_group_infos)
2540 + metalen = sizeof(*meta_group_info) *
2541 + (sbi->s_groups_count -
2542 + (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
2543 + meta_group_info = kmalloc(metalen, GFP_KERNEL);
2544 + if (meta_group_info == NULL) {
2545 + printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
2547 + goto err_freemeta;
2549 + sbi->s_group_info[i] = meta_group_info;
2553 + * calculate needed size. if change bb_counters size,
2554 + * don't forget about ext3_mb_generate_buddy()
2556 + len = sizeof(struct ext3_group_info);
2557 + len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
2558 + for (i = 0; i < sbi->s_groups_count; i++) {
2559 + struct ext3_group_desc * desc;
2562 + sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
2563 + j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
2565 + meta_group_info[j] = kmalloc(len, GFP_KERNEL);
2566 + if (meta_group_info[j] == NULL) {
2567 + printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
2569 + goto err_freebuddy;
2571 + desc = ext3_get_group_desc(sb, i, NULL);
2572 + if (desc == NULL) {
2573 + printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
2574 + goto err_freebuddy;
2576 + memset(meta_group_info[j], 0, len);
2577 + set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
2578 + &meta_group_info[j]->bb_state);
2580 + /* initialize bb_free to be able to skip
2581 + * empty groups without initialization */
2582 + meta_group_info[j]->bb_free =
2583 + le16_to_cpu(desc->bg_free_blocks_count);
2585 + INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
2587 +#ifdef DOUBLE_CHECK
2589 + struct buffer_head *bh;
2590 + meta_group_info[j]->bb_bitmap =
2591 + kmalloc(sb->s_blocksize, GFP_KERNEL);
2592 + BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
2593 + bh = read_block_bitmap(sb, i);
2594 + BUG_ON(bh == NULL);
2595 + memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
2607 + kfree(EXT3_GROUP_INFO(sb, i));
2610 + i = num_meta_group_infos;
2613 + kfree(sbi->s_group_info[i]);
2614 + iput(sbi->s_buddy_cache);
2616 + kfree(sbi->s_group_info);
2620 +int ext3_mb_init(struct super_block *sb, int needs_recovery)
2622 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2623 + unsigned i, offset, max;
2625 + if (!test_opt(sb, MBALLOC))
2628 + i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2630 + sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2631 + if (sbi->s_mb_offsets == NULL) {
2632 + clear_opt(sbi->s_mount_opt, MBALLOC);
2635 + sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2636 + if (sbi->s_mb_maxs == NULL) {
2637 + clear_opt(sbi->s_mount_opt, MBALLOC);
2638 + kfree(sbi->s_mb_maxs);
2642 + /* order 0 is regular bitmap */
2643 + sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2644 + sbi->s_mb_offsets[0] = 0;
2648 + max = sb->s_blocksize << 2;
2650 + sbi->s_mb_offsets[i] = offset;
2651 + sbi->s_mb_maxs[i] = max;
2652 + offset += 1 << (sb->s_blocksize_bits - i);
2655 + } while (i <= sb->s_blocksize_bits + 1);
2657 + /* init file for buddy data */
2658 + if ((i = ext3_mb_init_backend(sb))) {
2659 + clear_opt(sbi->s_mount_opt, MBALLOC);
2660 + kfree(sbi->s_mb_offsets);
2661 + kfree(sbi->s_mb_maxs);
2665 + spin_lock_init(&sbi->s_md_lock);
2666 + INIT_LIST_HEAD(&sbi->s_active_transaction);
2667 + INIT_LIST_HEAD(&sbi->s_closed_transaction);
2668 + INIT_LIST_HEAD(&sbi->s_committed_transaction);
2669 + spin_lock_init(&sbi->s_bal_lock);
2671 + sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2672 + sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2673 + sbi->s_mb_max_groups_to_scan = MB_DEFAULT_MAX_GROUPS_TO_SCAN;
2674 + sbi->s_mb_stats = MB_DEFAULT_STATS;
2675 + sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2676 + sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2677 + sbi->s_mb_history_filter = EXT3_MB_HISTORY_DEFAULT;
2679 + i = sizeof(struct ext3_locality_group) * num_possible_cpus();
2680 + sbi->s_locality_groups = kmalloc(i, GFP_NOFS);
2681 + if (sbi->s_locality_groups == NULL) {
2682 + clear_opt(sbi->s_mount_opt, MBALLOC);
2683 + kfree(sbi->s_mb_offsets);
2684 + kfree(sbi->s_mb_maxs);
2687 + for (i = 0; i < num_possible_cpus(); i++) {
2688 + struct ext3_locality_group *lg;
2689 + lg = &sbi->s_locality_groups[i];
2690 + sema_init(&lg->lg_sem, 1);
2691 + INIT_LIST_HEAD(&lg->lg_prealloc_list);
2692 + spin_lock_init(&lg->lg_prealloc_lock);
2695 + ext3_mb_init_per_dev_proc(sb);
2696 + ext3_mb_history_init(sb);
2698 + printk("EXT3-fs: mballoc enabled\n");
2702 +void ext3_mb_cleanup_pa(struct ext3_group_info *grp)
2704 + struct ext3_prealloc_space *pa;
2705 + struct list_head *cur, *tmp;
2708 + list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2709 + pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
2710 + list_del_rcu(&pa->pa_group_list);
2715 + mb_debug("mballoc: %u PAs left\n", count);
2719 +int ext3_mb_release(struct super_block *sb)
2721 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2722 + int i, num_meta_group_infos;
2724 + if (!test_opt(sb, MBALLOC))
2727 + /* release freed, non-committed blocks */
2728 + spin_lock(&sbi->s_md_lock);
2729 + list_splice_init(&sbi->s_closed_transaction,
2730 + &sbi->s_committed_transaction);
2731 + list_splice_init(&sbi->s_active_transaction,
2732 + &sbi->s_committed_transaction);
2733 + spin_unlock(&sbi->s_md_lock);
2734 + ext3_mb_free_committed_blocks(sb);
2736 + if (sbi->s_group_info) {
2737 + for (i = 0; i < sbi->s_groups_count; i++) {
2738 +#ifdef DOUBLE_CHECK
2739 + if (EXT3_GROUP_INFO(sb, i)->bb_bitmap)
2740 + kfree(EXT3_GROUP_INFO(sb, i)->bb_bitmap);
2742 + ext3_mb_cleanup_pa(EXT3_GROUP_INFO(sb, i));
2743 + kfree(EXT3_GROUP_INFO(sb, i));
2745 + num_meta_group_infos = (sbi->s_groups_count +
2746 + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2747 + EXT3_DESC_PER_BLOCK_BITS(sb);
2748 + for (i = 0; i < num_meta_group_infos; i++)
2749 + kfree(sbi->s_group_info[i]);
2750 + kfree(sbi->s_group_info);
2752 + if (sbi->s_mb_offsets)
2753 + kfree(sbi->s_mb_offsets);
2754 + if (sbi->s_mb_maxs)
2755 + kfree(sbi->s_mb_maxs);
2756 + if (sbi->s_buddy_cache)
2757 + iput(sbi->s_buddy_cache);
2758 + if (sbi->s_mb_stats) {
2759 + printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
2760 + atomic_read(&sbi->s_bal_allocated),
2761 + atomic_read(&sbi->s_bal_reqs),
2762 + atomic_read(&sbi->s_bal_success));
2763 + printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
2764 + "%u 2^N hits, %u breaks, %u lost\n",
2765 + atomic_read(&sbi->s_bal_ex_scanned),
2766 + atomic_read(&sbi->s_bal_goals),
2767 + atomic_read(&sbi->s_bal_2orders),
2768 + atomic_read(&sbi->s_bal_breaks),
2769 + atomic_read(&sbi->s_mb_lost_chunks));
2770 + printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
2771 + sbi->s_mb_buddies_generated++,
2772 + sbi->s_mb_generation_time);
2773 + printk("EXT3-fs: mballoc: %u preallocated, %u discarded\n",
2774 + atomic_read(&sbi->s_mb_preallocated),
2775 + atomic_read(&sbi->s_mb_discarded));
2778 + if (sbi->s_locality_groups)
2779 + kfree(sbi->s_locality_groups);
2781 + ext3_mb_history_release(sb);
2782 + ext3_mb_destroy_per_dev_proc(sb);
2787 +void ext3_mb_free_committed_blocks(struct super_block *sb)
2789 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2790 + int err, i, count = 0, count2 = 0;
2791 + struct ext3_free_metadata *md;
2792 + struct ext3_buddy e3b;
2794 + if (list_empty(&sbi->s_committed_transaction))
2797 + /* there is committed blocks to be freed yet */
2799 + /* get next array of blocks */
2801 + spin_lock(&sbi->s_md_lock);
2802 + if (!list_empty(&sbi->s_committed_transaction)) {
2803 + md = list_entry(sbi->s_committed_transaction.next,
2804 + struct ext3_free_metadata, list);
2805 + list_del(&md->list);
2807 + spin_unlock(&sbi->s_md_lock);
2812 + mb_debug("gonna free %u blocks in group %u (0x%p):",
2813 + md->num, md->group, md);
2815 + err = ext3_mb_load_buddy(sb, md->group, &e3b);
2816 + /* we expect to find existing buddy because it's pinned */
2819 + /* there are blocks to put in buddy to make them really free */
2822 + ext3_lock_group(sb, md->group);
2823 + for (i = 0; i < md->num; i++) {
2824 + mb_debug(" %u", md->blocks[i]);
2825 + err = mb_free_blocks(NULL, &e3b, md->blocks[i], 1);
2829 + ext3_unlock_group(sb, md->group);
2831 + /* balance refcounts from ext3_mb_free_metadata() */
2832 + page_cache_release(e3b.bd_buddy_page);
2833 + page_cache_release(e3b.bd_bitmap_page);
2836 + ext3_mb_release_desc(&e3b);
2840 + mb_debug("freed %u blocks in %u structures\n", count, count2);
2843 +#define EXT3_MB_STATS_NAME "stats"
2844 +#define EXT3_MB_MAX_TO_SCAN_NAME "max_to_scan"
2845 +#define EXT3_MB_MIN_TO_SCAN_NAME "min_to_scan"
2846 +#define EXT3_MB_ORDER2_REQ "order2_req"
2847 +#define EXT3_MB_STREAM_REQ "stream_req"
2849 +static int ext3_mb_stats_read(char *page, char **start, off_t off,
2850 + int count, int *eof, void *data)
2852 + struct ext3_sb_info *sbi = data;
2859 + len = sprintf(page, "%ld\n", sbi->s_mb_stats);
2864 +static int ext3_mb_stats_write(struct file *file, const char *buffer,
2865 + unsigned long count, void *data)
2867 + struct ext3_sb_info *sbi = data;
2870 + if (count >= sizeof(str)) {
2871 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2872 + EXT3_MB_STATS_NAME, (int)sizeof(str));
2873 + return -EOVERFLOW;
2876 + if (copy_from_user(str, buffer, count))
2879 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2880 + sbi->s_mb_stats = (simple_strtol(str, NULL, 0) != 0);
2884 +static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
2885 + int count, int *eof, void *data)
2887 + struct ext3_sb_info *sbi = data;
2894 + len = sprintf(page, "%ld\n", sbi->s_mb_max_to_scan);
2899 +static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
2900 + unsigned long count, void *data)
2902 + struct ext3_sb_info *sbi = data;
2906 + if (count >= sizeof(str)) {
2907 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2908 + EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
2909 + return -EOVERFLOW;
2912 + if (copy_from_user(str, buffer, count))
2915 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2916 + value = simple_strtol(str, NULL, 0);
2920 + sbi->s_mb_max_to_scan = value;
2925 +static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
2926 + int count, int *eof, void *data)
2928 + struct ext3_sb_info *sbi = data;
2935 + len = sprintf(page, "%ld\n", sbi->s_mb_min_to_scan);
2940 +static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
2941 + unsigned long count, void *data)
2943 + struct ext3_sb_info *sbi = data;
2947 + if (count >= sizeof(str)) {
2948 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2949 + EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
2950 + return -EOVERFLOW;
2953 + if (copy_from_user(str, buffer, count))
2956 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2957 + value = simple_strtol(str, NULL, 0);
2961 + sbi->s_mb_order2_reqs = value;
2966 +static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
2967 + int count, int *eof, void *data)
2969 + struct ext3_sb_info *sbi = data;
2976 + len = sprintf(page, "%ld\n", sbi->s_mb_order2_reqs);
2981 +static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
2982 + unsigned long count, void *data)
2984 + struct ext3_sb_info *sbi = data;
2988 + if (count >= sizeof(str)) {
2989 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2990 + EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
2991 + return -EOVERFLOW;
2994 + if (copy_from_user(str, buffer, count))
2997 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2998 + value = simple_strtol(str, NULL, 0);
3002 + sbi->s_mb_min_to_scan = value;
3007 +static int ext3_mb_stream_req_read(char *page, char **start, off_t off,
3008 + int count, int *eof, void *data)
3010 + struct ext3_sb_info *sbi = data;
3017 + len = sprintf(page, "%ld\n", sbi->s_mb_stream_request);
3022 +static int ext3_mb_stream_req_write(struct file *file, const char *buffer,
3023 + unsigned long count, void *data)
3025 + struct ext3_sb_info *sbi = data;
3029 + if (count >= sizeof(str)) {
3030 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
3031 + EXT3_MB_STREAM_REQ, (int)sizeof(str));
3032 + return -EOVERFLOW;
3035 + if (copy_from_user(str, buffer, count))
3038 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
3039 + value = simple_strtol(str, NULL, 0);
3043 + sbi->s_mb_stream_request = value;
3048 +int ext3_mb_init_per_dev_proc(struct super_block *sb)
3050 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3051 + mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
3052 + struct proc_dir_entry *proc;
3055 + name = EXT3_MB_STATS_NAME;
3056 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3060 + proc->read_proc = ext3_mb_stats_read;
3061 + proc->write_proc = ext3_mb_stats_write;
3063 + name = EXT3_MB_MAX_TO_SCAN_NAME;
3064 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3068 + proc->read_proc = ext3_mb_max_to_scan_read;
3069 + proc->write_proc = ext3_mb_max_to_scan_write;
3071 + name = EXT3_MB_MIN_TO_SCAN_NAME;
3072 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3076 + proc->read_proc = ext3_mb_min_to_scan_read;
3077 + proc->write_proc = ext3_mb_min_to_scan_write;
3079 + name = EXT3_MB_ORDER2_REQ;
3080 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3084 + proc->read_proc = ext3_mb_order2_req_read;
3085 + proc->write_proc = ext3_mb_order2_req_write;
3087 + name = EXT3_MB_STREAM_REQ;
3088 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3092 + proc->read_proc = ext3_mb_stream_req_read;
3093 + proc->write_proc = ext3_mb_stream_req_write;
3098 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", name);
3099 + remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_dev_proc);
3100 + remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_dev_proc);
3101 + remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_dev_proc);
3102 + remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_dev_proc);
3103 + remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_dev_proc);
3108 +int ext3_mb_destroy_per_dev_proc(struct super_block *sb)
3110 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3112 + if (sbi->s_dev_proc == NULL)
3115 + remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_dev_proc);
3116 + remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_dev_proc);
3117 + remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_dev_proc);
3118 + remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_dev_proc);
3119 + remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_dev_proc);
3124 +int __init init_ext3_mb_proc(void)
3126 + ext3_pspace_cachep =
3127 + kmem_cache_create("ext3_prealloc_space",
3128 + sizeof(struct ext3_prealloc_space),
3129 + 0, SLAB_RECLAIM_ACCOUNT, NULL, NULL);
3130 + if (ext3_pspace_cachep == NULL)
3136 +void exit_ext3_mb_proc(void)
3138 + /* XXX: synchronize_rcu(); */
3139 + kmem_cache_destroy(ext3_pspace_cachep);
3144 + * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
3145 + * Returns 0 if success or error code
3147 +int ext3_mb_mark_diskspace_used(struct ext3_allocation_context *ac, handle_t *handle)
3149 + struct buffer_head *bitmap_bh = NULL;
3150 + struct ext3_super_block *es;
3151 + struct ext3_group_desc *gdp;
3152 + struct buffer_head *gdp_bh;
3153 + struct ext3_sb_info *sbi;
3154 + struct super_block *sb;
3158 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3159 + BUG_ON(ac->ac_b_ex.fe_len <= 0);
3162 + sbi = EXT3_SB(sb);
3165 + ext3_debug("using block group %d(%d)\n", ac->ac_b_group.group,
3166 + gdp->bg_free_blocks_count);
3169 + bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3173 + err = ext3_journal_get_write_access(handle, bitmap_bh);
3178 + gdp = ext3_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3182 + err = ext3_journal_get_write_access(handle, gdp_bh);
3186 + block = ac->ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
3187 + + ac->ac_b_ex.fe_start
3188 + + le32_to_cpu(es->s_first_data_block);
3190 + if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
3191 + block == le32_to_cpu(gdp->bg_inode_bitmap) ||
3192 + in_range(block, le32_to_cpu(gdp->bg_inode_table),
3193 + EXT3_SB(sb)->s_itb_per_group))
3194 + ext3_error(sb, __FUNCTION__,
3195 + "Allocating block in system zone - block = %lu",
3196 + (unsigned long) block);
3197 +#ifdef AGGRESSIVE_CHECK
3200 + for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3201 + BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3202 + bitmap_bh->b_data));
3206 + mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
3207 + ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
3209 + spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3210 + gdp->bg_free_blocks_count =
3211 + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
3212 + - ac->ac_b_ex.fe_len);
3213 + spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3214 + percpu_counter_mod(&sbi->s_freeblocks_counter, - ac->ac_b_ex.fe_len);
3216 + err = ext3_journal_dirty_metadata(handle, bitmap_bh);
3219 + err = ext3_journal_dirty_metadata(handle, gdp_bh);
3223 + brelse(bitmap_bh);
3228 + * here we normalize request for locality group
3229 + * XXX: should we try to preallocate more than the group has now?
3231 +void ext3_mb_normalize_group_request(struct ext3_allocation_context *ac)
3233 + struct super_block *sb = ac->ac_sb;
3234 + struct ext3_locality_group *lg = ac->ac_lg;
3236 + BUG_ON(lg == NULL);
3237 + if (EXT3_SB(sb)->s_stripe)
3238 + ac->ac_g_ex.fe_len = EXT3_SB(sb)->s_stripe;
3240 + ac->ac_g_ex.fe_len = (1024 * 1024) >> sb->s_blocksize_bits;
3242 + mb_debug("#%u: goal %u blocks for locality group\n",
3243 + current->pid, ac->ac_g_ex.fe_len);
3247 + * Normalization means making request better in terms of
3248 + * size and alignment
3250 +void ext3_mb_normalize_request(struct ext3_allocation_context *ac,
3251 + struct ext3_allocation_request *ar)
3253 + struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
3254 + loff_t start, end, size, orig_size, orig_start;
3255 + struct list_head *cur;
3258 + /* do normalize only data requests, metadata requests
3259 + do not need preallocation */
3260 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
3263 + /* sometime caller may want exact blocks */
3264 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
3267 + /* caller may indicate that preallocation isn't
3268 + * required (it's a tail, for example) */
3269 + if (ac->ac_flags & EXT3_MB_HINT_NOPREALLOC)
3272 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
3273 + return ext3_mb_normalize_group_request(ac);
3275 + bsbits = ac->ac_sb->s_blocksize_bits;
3277 + /* first, let's learn actual file size
3278 + * given current request is allocated */
3279 + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3280 + size = size << bsbits;
3281 + if (size < i_size_read(ac->ac_inode))
3282 + size = i_size_read(ac->ac_inode);
3284 + /* max available blocks in a free group */
3285 + max = EXT3_BLOCKS_PER_GROUP(ac->ac_sb) - 1 - 1
3286 + - EXT3_SB(ac->ac_sb)->s_itb_per_group;
3288 +#define NRL_CHECK_SIZE(req,size,max,bits) \
3289 + (req <= (size) || max <= ((size) >> bits))
3291 + /* first, try to predict filesize */
3292 + /* XXX: should this table be tunable? */
3294 + if (size <= 16 * 1024) {
3296 + } else if (size <= 32 * 1024) {
3298 + } else if (size <= 64 * 1024) {
3300 + } else if (size <= 128 * 1024) {
3301 + size = 128 * 1024;
3302 + } else if (size <= 256 * 1024) {
3303 + size = 256 * 1024;
3304 + } else if (size <= 512 * 1024) {
3305 + size = 512 * 1024;
3306 + } else if (size <= 1024 * 1024) {
3307 + size = 1024 * 1024;
3308 + } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, bsbits)) {
3309 + start = ac->ac_o_ex.fe_logical << bsbits;
3310 + start = (start / (1024 * 1024)) * (1024 * 1024);
3311 + size = 1024 * 1024;
3312 + } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, bsbits)) {
3313 + start = ac->ac_o_ex.fe_logical << bsbits;
3314 + start = (start / (4 * (1024 * 1024))) * 4 * (1024 * 1024);
3315 + size = 4 * 1024 * 1024;
3316 + } else if(NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,(8<<20)>>bsbits,max,bsbits)){
3317 + start = ac->ac_o_ex.fe_logical;
3318 + start = start << bsbits;
3319 + start = (start / (8 * (1024 * 1024))) * 8 * (1024 * 1024);
3320 + size = 8 * 1024 * 1024;
3322 + start = ac->ac_o_ex.fe_logical;
3323 + start = start << bsbits;
3324 + size = ac->ac_o_ex.fe_len << bsbits;
3326 + orig_size = size = size >> bsbits;
3327 + orig_start = start = start >> bsbits;
3329 + /* don't cover already allocated blocks in selected range */
3330 + if (ar->pleft && start <= ar->lleft) {
3331 + size -= ar->lleft + 1 - start;
3332 + start = ar->lleft + 1;
3334 + if (ar->pright && start + size - 1 >= ar->lright)
3335 + size -= start + size - ar->lright;
3337 + end = start + size;
3339 + /* check we don't cross already preallocated blocks */
3341 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3342 + struct ext3_prealloc_space *pa;
3343 + unsigned long pa_end;
3345 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3347 + if (pa->pa_deleted)
3349 + spin_lock(&pa->pa_lock);
3350 + if (pa->pa_deleted) {
3351 + spin_unlock(&pa->pa_lock);
3355 + pa_end = pa->pa_lstart + pa->pa_len;
3357 + /* PA must not overlap original request */
3358 + BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3359 + ac->ac_o_ex.fe_logical < pa->pa_lstart));
3361 + /* skip PA normalized request doesn't overlap with */
3362 + if (pa->pa_lstart >= end) {
3363 + spin_unlock(&pa->pa_lock);
3366 + if (pa_end <= start) {
3367 + spin_unlock(&pa->pa_lock);
3370 + BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3372 + if (pa_end <= ac->ac_o_ex.fe_logical) {
3373 + BUG_ON(pa_end < start);
3377 + if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3378 + BUG_ON(pa->pa_lstart > end);
3379 + end = pa->pa_lstart;
3381 + spin_unlock(&pa->pa_lock);
3383 + rcu_read_unlock();
3384 + size = end - start;
3386 + /* XXX: extra loop to check we really don't overlap preallocations */
3388 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3389 + struct ext3_prealloc_space *pa;
3390 + unsigned long pa_end;
3391 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3392 + spin_lock(&pa->pa_lock);
3393 + if (pa->pa_deleted == 0) {
3394 + pa_end = pa->pa_lstart + pa->pa_len;
3395 + BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3397 + spin_unlock(&pa->pa_lock);
3399 + rcu_read_unlock();
3401 + if (start + size <= ac->ac_o_ex.fe_logical &&
3402 + start > ac->ac_o_ex.fe_logical) {
3403 + printk("start %lu, size %lu, fe_logical %lu\n",
3404 + (unsigned long) start, (unsigned long) size,
3405 + (unsigned long) ac->ac_o_ex.fe_logical);
3407 + BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3408 + start > ac->ac_o_ex.fe_logical);
3410 + /* now prepare goal request */
3411 + BUG_ON(size <= 0 || size >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
3412 + if (size < ac->ac_o_ex.fe_len) {
3413 + /* XXX: don't normalize tails? */
3416 + /* XXX: is it better to align blocks WRT to logical placement
3417 + * or satisfy big request as is */
3418 + ac->ac_g_ex.fe_logical = start;
3419 + ac->ac_g_ex.fe_len = size;
3421 + mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3422 + (unsigned) orig_size, (unsigned) start);
3425 +void ext3_mb_collect_stats(struct ext3_allocation_context *ac)
3427 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
3429 + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3430 + atomic_inc(&sbi->s_bal_reqs);
3431 + atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3432 + if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3433 + atomic_inc(&sbi->s_bal_success);
3434 + atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3435 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3436 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3437 + atomic_inc(&sbi->s_bal_goals);
3438 + if (ac->ac_found > sbi->s_mb_max_to_scan)
3439 + atomic_inc(&sbi->s_bal_breaks);
3442 + ext3_mb_store_history(ac);
3446 + * use blocks preallocated to inode
3448 +void ext3_mb_use_inode_pa(struct ext3_allocation_context *ac,
3449 + struct ext3_prealloc_space *pa)
3451 + unsigned long start, len;
3453 + /* found preallocated blocks, use them */
3454 + start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3455 + len = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3456 + len = len - start;
3457 + ext3_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3458 + &ac->ac_b_ex.fe_start);
3459 + ac->ac_b_ex.fe_len = len;
3460 + ac->ac_status = AC_STATUS_FOUND;
3463 + BUG_ON(start < pa->pa_pstart);
3464 + BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3465 + BUG_ON(pa->pa_free < len);
3466 + pa->pa_free -= len;
3468 + mb_debug("use %lu/%lu from inode pa %p\n", start, len, pa);
3472 + * use blocks preallocated to locality group
3474 +void ext3_mb_use_group_pa(struct ext3_allocation_context *ac,
3475 + struct ext3_prealloc_space *pa)
3477 + unsigned len = ac->ac_o_ex.fe_len;
3479 + ext3_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3480 + &ac->ac_b_ex.fe_group,
3481 + &ac->ac_b_ex.fe_start);
3482 + ac->ac_b_ex.fe_len = len;
3483 + ac->ac_status = AC_STATUS_FOUND;
3486 + /* we don't correct pa_pstart or pa_plen here to avoid
3487 + * possible race when tte group is being loaded concurrently
3488 + * instead we correct pa later, after blocks are marked
3489 + * in on-disk bitmap -- see ext3_mb_release_context() */
3490 + mb_debug("use %lu/%lu from group pa %p\n", pa->pa_lstart-len, len, pa);
3494 + * search goal blocks in preallocated space
3496 +int ext3_mb_use_preallocated(struct ext3_allocation_context *ac)
3498 + struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
3499 + struct ext3_locality_group *lg;
3500 + struct ext3_prealloc_space *pa;
3501 + struct list_head *cur;
3503 + /* only data can be preallocated */
3504 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
3507 + /* first, try per-file preallocation */
3509 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3510 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3512 + /* all fields in this condition don't change,
3513 + * so we can skip locking for them */
3514 + if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3515 + ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3518 + /* found preallocated blocks, use them */
3519 + spin_lock(&pa->pa_lock);
3520 + if (pa->pa_deleted == 0 && pa->pa_free) {
3521 + atomic_inc(&pa->pa_count);
3522 + ext3_mb_use_inode_pa(ac, pa);
3523 + spin_unlock(&pa->pa_lock);
3524 + ac->ac_criteria = 10;
3525 + rcu_read_unlock();
3528 + spin_unlock(&pa->pa_lock);
3530 + rcu_read_unlock();
3532 + /* can we use group allocation? */
3533 + if (!(ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC))
3536 + /* inode may have no locality group for some reason */
3542 + list_for_each_rcu(cur, &lg->lg_prealloc_list) {
3543 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3544 + spin_lock(&pa->pa_lock);
3545 + if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
3546 + atomic_inc(&pa->pa_count);
3547 + ext3_mb_use_group_pa(ac, pa);
3548 + spin_unlock(&pa->pa_lock);
3549 + ac->ac_criteria = 20;
3550 + rcu_read_unlock();
3553 + spin_unlock(&pa->pa_lock);
3555 + rcu_read_unlock();
3561 + * the function goes through all preallocation in this group and marks them
3562 + * used in in-core bitmap. buddy must be generated from this bitmap
3564 +void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group)
3566 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
3567 + struct ext3_prealloc_space *pa;
3568 + struct list_head *cur;
3569 + unsigned long groupnr;
3570 + unsigned long start;
3571 + int preallocated = 0, count = 0, len;
3573 + /* all form of preallocation discards first load group,
3574 + * so the only competing code is preallocation use.
3575 + * we don't need any locking here
3576 + * notice we do NOT ignore preallocations with pa_deleted
3577 + * otherwise we could leave used blocks available for
3578 + * allocation in buddy when concurrent ext3_mb_put_pa()
3579 + * is dropping preallocation
3581 + list_for_each_rcu(cur, &grp->bb_prealloc_list) {
3582 + pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
3583 + spin_lock(&pa->pa_lock);
3584 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start);
3586 + spin_unlock(&pa->pa_lock);
3587 + if (unlikely(len == 0))
3589 + BUG_ON(groupnr != group && len != 0);
3590 + mb_set_bits(sb_bgl_lock(EXT3_SB(sb), group), bitmap, start,len);
3591 + preallocated += len;
3594 + mb_debug("prellocated %u for group %u\n", preallocated, group);
3597 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3598 +static void ext3_mb_pa_callback(struct rcu_head *head)
3600 + struct ext3_prealloc_space *pa;
3601 + pa = container_of(head, struct ext3_prealloc_space, u.pa_rcu);
3602 + kmem_cache_free(ext3_pspace_cachep, pa);
3604 +#define mb_call_rcu(__pa) call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback)
3606 +static void ext3_mb_pa_callback(void *pa)
3608 + kmem_cache_free(ext3_pspace_cachep, pa);
3610 +#define mb_call_rcu(__pa) call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback, pa)
3614 + * drops a reference to preallocated space descriptor
3615 + * if this was the last reference and the space is consumed
3617 +void ext3_mb_put_pa(struct ext3_allocation_context *ac,
3618 + struct super_block *sb, struct ext3_prealloc_space *pa)
3620 + unsigned long grp;
3622 + if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3625 + /* in this short window concurrent discard can set pa_deleted */
3626 + spin_lock(&pa->pa_lock);
3627 + if (pa->pa_deleted == 0) {
3628 + spin_unlock(&pa->pa_lock);
3632 + pa->pa_deleted = 1;
3633 + spin_unlock(&pa->pa_lock);
3635 + /* -1 is to protect from crossing allocation group */
3636 + ext3_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3641 + * P1 (buddy init) P2 (regular allocation)
3642 + * find block B in PA
3643 + * copy on-disk bitmap to buddy
3644 + * mark B in on-disk bitmap
3645 + * drop PA from group
3646 + * mark all PAs in buddy
3648 + * thus, P1 initializes buddy with B available. to prevent this
3649 + * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3650 + * against that pair
3652 + ext3_lock_group(sb, grp);
3653 + list_del_rcu(&pa->pa_group_list);
3654 + ext3_unlock_group(sb, grp);
3656 + spin_lock(pa->pa_obj_lock);
3657 + list_del_rcu(&pa->pa_inode_list);
3658 + spin_unlock(pa->pa_obj_lock);
3664 + * creates new preallocated space for given inode
3666 +int ext3_mb_new_inode_pa(struct ext3_allocation_context *ac)
3668 + struct super_block *sb = ac->ac_sb;
3669 + struct ext3_prealloc_space *pa;
3670 + struct ext3_group_info *grp;
3671 + struct ext3_inode_info *ei;
3673 + /* preallocate only when found space is larger then requested */
3674 + BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3675 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3676 + BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3678 + pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
3682 + if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3683 + int winl, wins, win, offs;
3685 + /* we can't allocate as much as normalizer wants.
3686 + * so, found space must get proper lstart
3687 + * to cover original request */
3688 + BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3689 + BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3691 + /* we're limited by original request in that
3692 + * logical block must be covered any way
3693 + * winl is window we can move our chunk within */
3694 + winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3696 + /* also, we should cover whole original request */
3697 + wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3699 + /* the smallest one defines real window */
3700 + win = min(winl, wins);
3702 + offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3703 + if (offs && offs < win)
3706 + ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3707 + BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3708 + BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3711 + /* preallocation can change ac_b_ex, thus we store actually
3712 + * allocated blocks for history */
3713 + ac->ac_f_ex = ac->ac_b_ex;
3715 + pa->pa_lstart = ac->ac_b_ex.fe_logical;
3716 + pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
3717 + pa->pa_len = ac->ac_b_ex.fe_len;
3718 + pa->pa_free = pa->pa_len;
3719 + atomic_set(&pa->pa_count, 1);
3720 + spin_lock_init(&pa->pa_lock);
3721 + pa->pa_deleted = 0;
3722 + pa->pa_linear = 0;
3724 + mb_debug("new inode pa %p: %lu/%lu for %lu\n", pa,
3725 + pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3727 + ext3_mb_use_inode_pa(ac, pa);
3728 + atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
3730 + ei = EXT3_I(ac->ac_inode);
3731 + grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
3733 + pa->pa_obj_lock = &ei->i_prealloc_lock;
3734 + pa->pa_inode = ac->ac_inode;
3736 + ext3_lock_group(sb, ac->ac_b_ex.fe_group);
3737 + list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
3738 + ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
3740 + spin_lock(pa->pa_obj_lock);
3741 + list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3742 + spin_unlock(pa->pa_obj_lock);
3748 + * creates new preallocated space for locality group inodes belongs to
3750 +int ext3_mb_new_group_pa(struct ext3_allocation_context *ac)
3752 + struct super_block *sb = ac->ac_sb;
3753 + struct ext3_locality_group *lg;
3754 + struct ext3_prealloc_space *pa;
3755 + struct ext3_group_info *grp;
3757 + /* preallocate only when found space is larger then requested */
3758 + BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3759 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3760 + BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3762 + BUG_ON(ext3_pspace_cachep == NULL);
3763 + pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
3767 + /* preallocation can change ac_b_ex, thus we store actually
3768 + * allocated blocks for history */
3769 + ac->ac_f_ex = ac->ac_b_ex;
3771 + pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
3772 + pa->pa_lstart = pa->pa_pstart;
3773 + pa->pa_len = ac->ac_b_ex.fe_len;
3774 + pa->pa_free = pa->pa_len;
3775 + atomic_set(&pa->pa_count, 1);
3776 + spin_lock_init(&pa->pa_lock);
3777 + pa->pa_deleted = 0;
3778 + pa->pa_linear = 1;
3780 + mb_debug("new group pa %p: %lu/%lu for %lu\n", pa,
3781 + pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3783 + ext3_mb_use_group_pa(ac, pa);
3784 + atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
3786 + grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
3788 + BUG_ON(lg == NULL);
3790 + pa->pa_obj_lock = &lg->lg_prealloc_lock;
3791 + pa->pa_inode = NULL;
3793 + ext3_lock_group(sb, ac->ac_b_ex.fe_group);
3794 + list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
3795 + ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
3797 + spin_lock(pa->pa_obj_lock);
3798 + list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
3799 + spin_unlock(pa->pa_obj_lock);
3804 +int ext3_mb_new_preallocation(struct ext3_allocation_context *ac)
3808 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
3809 + err = ext3_mb_new_group_pa(ac);
3811 + err = ext3_mb_new_inode_pa(ac);
3816 + * finds all unused blocks in on-disk bitmap, frees them in
3817 + * in-core bitmap and buddy.
3818 + * @pa must be unlinked from inode and group lists, so that
3819 + * nobody else can find/use it.
3820 + * the caller MUST hold group/inode locks.
3821 + * TODO: optimize the case when there are no in-core structures yet
3823 +int ext3_mb_release_inode_pa(struct ext3_buddy *e3b,
3824 + struct buffer_head *bitmap_bh,
3825 + struct ext3_prealloc_space *pa)
3827 + struct ext3_allocation_context ac;
3828 + struct super_block *sb = e3b->bd_sb;
3829 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3830 + unsigned long bit, end, next, group;
3832 + int err = 0, free = 0;
3834 + BUG_ON(pa->pa_deleted == 0);
3835 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3836 + BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
3837 + end = bit + pa->pa_len;
3840 + ac.ac_inode = pa->pa_inode;
3841 + ac.ac_op = EXT3_MB_HISTORY_DISCARD;
3843 + while (bit < end) {
3844 + bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3847 + next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3850 + start = group * EXT3_BLOCKS_PER_GROUP(sb) + bit +
3851 + le32_to_cpu(sbi->s_es->s_first_data_block);
3852 + mb_debug(" free preallocated %u/%u in group %u\n",
3853 + (unsigned) start, (unsigned) next - bit,
3854 + (unsigned) group);
3855 + free += next - bit;
3857 + ac.ac_b_ex.fe_group = group;
3858 + ac.ac_b_ex.fe_start = bit;
3859 + ac.ac_b_ex.fe_len = next - bit;
3860 + ac.ac_b_ex.fe_logical = 0;
3861 + ext3_mb_store_history(&ac);
3863 + mb_free_blocks(pa->pa_inode, e3b, bit, next - bit);
3866 + if (free != pa->pa_free) {
3867 + printk("pa %p: logic %lu, phys. %lu, len %lu\n",
3868 + pa, (unsigned long) pa->pa_lstart,
3869 + (unsigned long) pa->pa_pstart,
3870 + (unsigned long) pa->pa_len);
3871 + printk("free %u, pa_free %u\n", free, pa->pa_free);
3873 + BUG_ON(free != pa->pa_free);
3874 + atomic_add(free, &sbi->s_mb_discarded);
3879 +int ext3_mb_release_group_pa(struct ext3_buddy *e3b,
3880 + struct ext3_prealloc_space *pa)
3882 + struct ext3_allocation_context ac;
3883 + struct super_block *sb = e3b->bd_sb;
3884 + unsigned long bit, group;
3886 + ac.ac_op = EXT3_MB_HISTORY_DISCARD;
3888 + BUG_ON(pa->pa_deleted == 0);
3889 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3890 + BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
3891 + mb_free_blocks(pa->pa_inode, e3b, bit, pa->pa_len);
3892 + atomic_add(pa->pa_len, &EXT3_SB(sb)->s_mb_discarded);
3895 + ac.ac_inode = NULL;
3896 + ac.ac_b_ex.fe_group = group;
3897 + ac.ac_b_ex.fe_start = bit;
3898 + ac.ac_b_ex.fe_len = pa->pa_len;
3899 + ac.ac_b_ex.fe_logical = 0;
3900 + ext3_mb_store_history(&ac);
3906 + * releases all preallocations in given group
3908 + * first, we need to decide discard policy:
3909 + * - when do we discard
3911 + * - how many do we discard
3912 + * 1) how many requested
3914 +int ext3_mb_discard_group_preallocations(struct super_block *sb,
3915 + int group, int needed)
3917 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
3918 + struct buffer_head *bitmap_bh = NULL;
3919 + struct ext3_prealloc_space *pa, *tmp;
3920 + struct list_head list;
3921 + struct ext3_buddy e3b;
3922 + int err, busy, free = 0;
3924 + mb_debug("discard preallocation for group %lu\n", group);
3926 + if (list_empty(&grp->bb_prealloc_list))
3929 + bitmap_bh = read_block_bitmap(sb, group);
3930 + if (bitmap_bh == NULL) {
3931 + /* error handling here */
3932 + ext3_mb_release_desc(&e3b);
3933 + BUG_ON(bitmap_bh == NULL);
3936 + err = ext3_mb_load_buddy(sb, group, &e3b);
3937 + BUG_ON(err != 0); /* error handling here */
3940 + needed = EXT3_BLOCKS_PER_GROUP(sb) + 1;
3942 + grp = EXT3_GROUP_INFO(sb, group);
3943 + INIT_LIST_HEAD(&list);
3947 + ext3_lock_group(sb, group);
3948 + list_for_each_entry_safe (pa, tmp, &grp->bb_prealloc_list, pa_group_list) {
3949 + spin_lock(&pa->pa_lock);
3950 + if (atomic_read(&pa->pa_count)) {
3951 + spin_unlock(&pa->pa_lock);
3955 + if (pa->pa_deleted) {
3956 + spin_unlock(&pa->pa_lock);
3960 + /* seems this one can be freed ... */
3961 + pa->pa_deleted = 1;
3963 + /* we can trust pa_free ... */
3964 + free += pa->pa_free;
3966 + spin_unlock(&pa->pa_lock);
3968 + list_del_rcu(&pa->pa_group_list);
3969 + list_add(&pa->u.pa_tmp_list, &list);
3972 + /* if we still need more blocks and some PAs were used, try again */
3973 + if (free < needed && busy) {
3974 + ext3_unlock_group(sb, group);
3978 + /* found anything to free? */
3979 + if (list_empty(&list)) {
3980 + BUG_ON(free != 0);
3984 + /* now free all selected PAs */
3985 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3987 + /* remove from object (inode or locality group) */
3988 + spin_lock(pa->pa_obj_lock);
3989 + list_del_rcu(&pa->pa_inode_list);
3990 + spin_unlock(pa->pa_obj_lock);
3992 + if (pa->pa_linear)
3993 + ext3_mb_release_group_pa(&e3b, pa);
3995 + ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
3997 + list_del(&pa->u.pa_tmp_list);
4002 + ext3_unlock_group(sb, group);
4003 + ext3_mb_release_desc(&e3b);
4004 + brelse(bitmap_bh);
4009 + * releases all non-used preallocated blocks for given inode
4011 +void ext3_mb_discard_inode_preallocations(struct inode *inode)
4013 + struct ext3_inode_info *ei = EXT3_I(inode);
4014 + struct super_block *sb = inode->i_sb;
4015 + struct buffer_head *bitmap_bh = NULL;
4016 + struct ext3_prealloc_space *pa, *tmp;
4017 + unsigned long group = 0;
4018 + struct list_head list;
4019 + struct ext3_buddy e3b;
4022 + if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
4023 + /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4027 + mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
4029 + INIT_LIST_HEAD(&list);
4032 + /* first, collect all pa's in the inode */
4033 + spin_lock(&ei->i_prealloc_lock);
4034 + while (!list_empty(&ei->i_prealloc_list)) {
4035 + pa = list_entry(ei->i_prealloc_list.next,
4036 + struct ext3_prealloc_space, pa_inode_list);
4037 + BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4038 + spin_lock(&pa->pa_lock);
4039 + if (atomic_read(&pa->pa_count)) {
4040 + /* this shouldn't happen often - nobody should
4041 + * use preallocation while we're discarding it */
4042 + spin_unlock(&pa->pa_lock);
4043 + spin_unlock(&ei->i_prealloc_lock);
4044 + current->state = TASK_UNINTERRUPTIBLE;
4045 + schedule_timeout(HZ);
4049 + if (pa->pa_deleted == 0) {
4050 + pa->pa_deleted = 1;
4051 + spin_unlock(&pa->pa_lock);
4052 + list_del_rcu(&pa->pa_inode_list);
4053 + list_add(&pa->u.pa_tmp_list, &list);
4057 + /* someone is deleting pa right now */
4058 + spin_unlock(&pa->pa_lock);
4059 + spin_unlock(&ei->i_prealloc_lock);
4061 + /* we have to wait here because pa_deleted
4062 + * doesn't mean pa is already unlinked from
4063 + * the list. as we might be called from
4064 + * ->clear_inode() the inode will get freed
4065 + * and concurrent thread which is unlinking
4066 + * pa from inode's list may access already
4067 + * freed memory, bad-bad-bad */
4069 + /* XXX: if this happens too often, we can
4070 + * add a flag to force wait only in case
4071 + * of ->clear_inode(), but not in case of
4072 + * regular truncate */
4073 + current->state = TASK_UNINTERRUPTIBLE;
4074 + schedule_timeout(HZ);
4077 + spin_unlock(&ei->i_prealloc_lock);
4079 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4080 + BUG_ON(pa->pa_linear != 0);
4081 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4083 + err = ext3_mb_load_buddy(sb, group, &e3b);
4084 + BUG_ON(err != 0); /* error handling here */
4086 + bitmap_bh = read_block_bitmap(sb, group);
4088 + ext3_lock_group(sb, group);
4089 + list_del_rcu(&pa->pa_group_list);
4091 + /* can be NULL due to IO error, at worst
4092 + * we leave some free blocks unavailable
4093 + * do not go RO - no need for */
4094 + if (bitmap_bh != NULL)
4095 + ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
4096 + ext3_unlock_group(sb, group);
4098 + ext3_mb_release_desc(&e3b);
4099 + brelse(bitmap_bh);
4101 + list_del(&pa->u.pa_tmp_list);
4107 + * finds all preallocated spaces and return blocks being freed to them
4108 + * if preallocated space becomes full (no block is used from the space)
4109 + * then the function frees space in buddy
4110 + * XXX: at the moment, truncate (which is the only way to free blocks)
4111 + * discards all preallocations
4113 +void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
4114 + sector_t block, int count)
4116 + BUG_ON(!list_empty(&EXT3_I(inode)->i_prealloc_list));
4119 +void ext3_mb_show_ac(struct ext3_allocation_context *ac)
4122 + struct super_block *sb = ac->ac_sb;
4125 + printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
4126 + ac->ac_status, ac->ac_flags);
4127 + printk(KERN_ERR "EXT3-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4128 + "best %lu/%lu/%lu@%lu cr %d\n",
4129 + ac->ac_o_ex.fe_group, ac->ac_o_ex.fe_start,
4130 + ac->ac_o_ex.fe_len, ac->ac_o_ex.fe_logical,
4131 + ac->ac_g_ex.fe_group, ac->ac_g_ex.fe_start,
4132 + ac->ac_g_ex.fe_len, ac->ac_g_ex.fe_logical,
4133 + ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
4134 + ac->ac_b_ex.fe_len, ac->ac_b_ex.fe_logical,
4136 + printk(KERN_ERR "EXT3-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4138 + printk("EXT3-fs: groups: ");
4139 + for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
4140 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, i);
4141 + struct ext3_prealloc_space *pa;
4142 + unsigned long start;
4143 + struct list_head *cur;
4144 + list_for_each_rcu(cur, &grp->bb_prealloc_list) {
4145 + pa = list_entry(cur, struct ext3_prealloc_space,
4147 + spin_lock(&pa->pa_lock);
4148 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start);
4149 + spin_unlock(&pa->pa_lock);
4150 + printk("PA:%u:%lu:%u ", i, start, pa->pa_len);
4153 + if (grp->bb_free == 0)
4155 + printk("%d: %d/%d ", i, grp->bb_free, grp->bb_fragments);
4162 +void ext3_mb_group_or_file(struct ext3_allocation_context *ac)
4164 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
4165 + int bsbits = ac->ac_sb->s_blocksize_bits;
4166 + loff_t size, isize;
4168 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
4171 + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4172 + isize = i_size_read(ac->ac_inode) >> bsbits;
4176 + /* don't use group allocation for large files */
4177 + if (size >= sbi->s_mb_stream_request)
4180 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
4183 + BUG_ON(ac->ac_lg != NULL);
4184 + ac->ac_lg = &sbi->s_locality_groups[smp_processor_id()];
4186 + /* we're going to use group allocation */
4187 + ac->ac_flags |= EXT3_MB_HINT_GROUP_ALLOC;
4189 + /* serialize all allocations in the group */
4190 + down(&ac->ac_lg->lg_sem);
4193 +int ext3_mb_initialize_context(struct ext3_allocation_context *ac,
4194 + struct ext3_allocation_request *ar)
4196 + struct super_block *sb = ar->inode->i_sb;
4197 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4198 + struct ext3_super_block *es = sbi->s_es;
4199 + unsigned long group, len, goal;
4200 + unsigned long block;
4202 + /* we can't allocate > group size */
4204 + if (len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
4205 + len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
4207 + /* start searching from the goal */
4209 + if (goal < le32_to_cpu(es->s_first_data_block) ||
4210 + goal >= le32_to_cpu(es->s_blocks_count))
4211 + goal = le32_to_cpu(es->s_first_data_block);
4212 + ext3_get_group_no_and_offset(sb, goal, &group, &block);
4214 + /* set up allocation goals */
4215 + ac->ac_b_ex.fe_logical = ar->logical;
4216 + ac->ac_b_ex.fe_group = 0;
4217 + ac->ac_b_ex.fe_start = 0;
4218 + ac->ac_b_ex.fe_len = 0;
4219 + ac->ac_status = AC_STATUS_CONTINUE;
4220 + ac->ac_groups_scanned = 0;
4221 + ac->ac_ex_scanned = 0;
4224 + ac->ac_inode = ar->inode;
4225 + ac->ac_o_ex.fe_logical = ar->logical;
4226 + ac->ac_o_ex.fe_group = group;
4227 + ac->ac_o_ex.fe_start = block;
4228 + ac->ac_o_ex.fe_len = len;
4229 + ac->ac_g_ex.fe_logical = ar->logical;
4230 + ac->ac_g_ex.fe_group = group;
4231 + ac->ac_g_ex.fe_start = block;
4232 + ac->ac_g_ex.fe_len = len;
4233 + ac->ac_f_ex.fe_len = 0;
4234 + ac->ac_flags = ar->flags;
4235 + ac->ac_2order = 0;
4236 + ac->ac_criteria = 0;
4238 + ac->ac_bitmap_page = NULL;
4239 + ac->ac_buddy_page = NULL;
4242 + /* we have to define context: we'll we work with a file or
4243 + * locality group. this is a policy, actually */
4244 + ext3_mb_group_or_file(ac);
4246 + mb_debug("init ac: %u blocks @ %llu, goal %llu, flags %x, 2^%d, "
4247 + "left: %llu/%llu, right %llu/%llu to %swritable\n",
4248 + (unsigned) ar->len, (unsigned) ar->logical,
4249 + (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4250 + (unsigned) ar->lleft, (unsigned) ar->pleft,
4251 + (unsigned) ar->lright, (unsigned) ar->pright,
4252 + atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4258 + * release all resource we used in allocation
4260 +int ext3_mb_release_context(struct ext3_allocation_context *ac)
4263 + if (ac->ac_pa->pa_linear) {
4264 + /* see comment in ext3_mb_use_group_pa() */
4265 + spin_lock(&ac->ac_pa->pa_lock);
4266 + ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
4267 + ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
4268 + ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
4269 + ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
4270 + spin_unlock(&ac->ac_pa->pa_lock);
4272 + ext3_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
4274 + if (ac->ac_bitmap_page)
4275 + page_cache_release(ac->ac_bitmap_page);
4276 + if (ac->ac_buddy_page)
4277 + page_cache_release(ac->ac_buddy_page);
4278 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
4279 + up(&ac->ac_lg->lg_sem);
4280 + ext3_mb_collect_stats(ac);
4284 +int ext3_mb_discard_preallocations(struct super_block *sb, int needed)
4286 + int i, ret, freed = 0;
4288 + for (i = 0; i < EXT3_SB(sb)->s_groups_count && needed > 0; i++) {
4289 + ret = ext3_mb_discard_group_preallocations(sb, i, needed);
4298 + * Main entry point into mballoc to allocate blocks
4299 + * it tries to use preallocation first, then falls back
4300 + * to usual allocation
4302 +unsigned long ext3_mb_new_blocks(handle_t *handle,
4303 + struct ext3_allocation_request *ar, int *errp)
4305 + struct ext3_allocation_context ac;
4306 + struct ext3_sb_info *sbi;
4307 + struct super_block *sb;
4308 + unsigned long block = 0;
4309 + int freed, inquota;
4311 + sb = ar->inode->i_sb;
4312 + sbi = EXT3_SB(sb);
4314 + if (!test_opt(sb, MBALLOC)) {
4315 + static int ext3_mballoc_warning = 0;
4316 + if (ext3_mballoc_warning++ == 0)
4317 + printk(KERN_ERR "EXT3-fs: multiblock request with "
4318 + "mballoc disabled!\n");
4320 + block = ext3_new_block_old(handle, ar->inode, ar->goal, errp);
4324 + while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4325 + ar->flags |= EXT3_MB_HINT_NOPREALLOC;
4328 + if (ar->len == 0) {
4332 + inquota = ar->len;
4334 + ext3_mb_poll_new_transaction(sb, handle);
4336 + if ((*errp = ext3_mb_initialize_context(&ac, ar))) {
4341 + ac.ac_op = EXT3_MB_HISTORY_PREALLOC;
4342 + if (!ext3_mb_use_preallocated(&ac)) {
4344 + ac.ac_op = EXT3_MB_HISTORY_ALLOC;
4345 + ext3_mb_normalize_request(&ac, ar);
4348 + /* allocate space in core */
4349 + ext3_mb_regular_allocator(&ac);
4351 + /* as we've just preallocated more space than
4352 + * user requested orinally, we store allocated
4353 + * space in a special descriptor */
4354 + if (ac.ac_status == AC_STATUS_FOUND &&
4355 + ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len)
4356 + ext3_mb_new_preallocation(&ac);
4359 + if (likely(ac.ac_status == AC_STATUS_FOUND)) {
4360 + ext3_mb_mark_diskspace_used(&ac, handle);
4362 + block = ext3_grp_offs_to_block(sb, &ac.ac_b_ex);
4363 + ar->len = ac.ac_b_ex.fe_len;
4365 + freed = ext3_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len);
4369 + ac.ac_b_ex.fe_len = 0;
4371 + ext3_mb_show_ac(&ac);
4374 + ext3_mb_release_context(&ac);
4377 + if (ar->len < inquota)
4378 + DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4382 +EXPORT_SYMBOL(ext3_mb_new_blocks);
4384 +int ext3_new_block(handle_t *handle, struct inode *inode,
4385 + unsigned long goal, int *errp)
4387 + struct ext3_allocation_request ar;
4388 + unsigned long ret;
4390 + if (!test_opt(inode->i_sb, MBALLOC)) {
4391 + ret = ext3_new_block_old(handle, inode, goal, errp);
4404 + ret = ext3_mb_new_blocks(handle, &ar, errp);
4408 +void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
4410 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4412 + if (sbi->s_last_transaction == handle->h_transaction->t_tid)
4415 + /* new transaction! time to close last one and free blocks for
4416 + * committed transaction. we know that only transaction can be
4417 + * active, so previos transaction can be being logged and we
4418 + * know that transaction before previous is known to be already
4419 + * logged. this means that now we may free blocks freed in all
4420 + * transactions before previous one. hope I'm clear enough ... */
4422 + spin_lock(&sbi->s_md_lock);
4423 + if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
4424 + mb_debug("new transaction %lu, old %lu\n",
4425 + (unsigned long) handle->h_transaction->t_tid,
4426 + (unsigned long) sbi->s_last_transaction);
4427 + list_splice_init(&sbi->s_closed_transaction,
4428 + &sbi->s_committed_transaction);
4429 + list_splice_init(&sbi->s_active_transaction,
4430 + &sbi->s_closed_transaction);
4431 + sbi->s_last_transaction = handle->h_transaction->t_tid;
4433 + spin_unlock(&sbi->s_md_lock);
4435 + ext3_mb_free_committed_blocks(sb);
4438 +int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
4439 + int group, int block, int count)
4441 + struct ext3_group_info *db = e3b->bd_info;
4442 + struct super_block *sb = e3b->bd_sb;
4443 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4444 + struct ext3_free_metadata *md;
4447 + BUG_ON(e3b->bd_bitmap_page == NULL);
4448 + BUG_ON(e3b->bd_buddy_page == NULL);
4450 + ext3_lock_group(sb, group);
4451 + for (i = 0; i < count; i++) {
4452 + md = db->bb_md_cur;
4453 + if (md && db->bb_tid != handle->h_transaction->t_tid) {
4454 + db->bb_md_cur = NULL;
4459 + ext3_unlock_group(sb, group);
4460 + md = kmalloc(sizeof(*md), GFP_KERNEL);
4464 + md->group = group;
4466 + ext3_lock_group(sb, group);
4467 + if (db->bb_md_cur == NULL) {
4468 + spin_lock(&sbi->s_md_lock);
4469 + list_add(&md->list, &sbi->s_active_transaction);
4470 + spin_unlock(&sbi->s_md_lock);
4471 + /* protect buddy cache from being freed,
4472 + * otherwise we'll refresh it from
4473 + * on-disk bitmap and lose not-yet-available
4475 + page_cache_get(e3b->bd_buddy_page);
4476 + page_cache_get(e3b->bd_bitmap_page);
4477 + db->bb_md_cur = md;
4478 + db->bb_tid = handle->h_transaction->t_tid;
4479 + mb_debug("new md 0x%p for group %u\n",
4483 + md = db->bb_md_cur;
4487 + BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
4488 + md->blocks[md->num] = block + i;
4490 + if (md->num == EXT3_BB_MAX_BLOCKS) {
4491 + /* no more space, put full container on a sb's list */
4492 + db->bb_md_cur = NULL;
4495 + ext3_unlock_group(sb, group);
4500 + * Main entry point into mballoc to free blocks
4502 +void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
4503 + unsigned long block, unsigned long count,
4504 + int metadata, int *freed)
4506 + struct buffer_head *bitmap_bh = NULL;
4507 + struct super_block *sb = inode->i_sb;
4508 + struct ext3_allocation_context ac;
4509 + struct ext3_group_desc *gdp;
4510 + struct ext3_super_block *es;
4511 + unsigned long bit, overflow;
4512 + struct buffer_head *gd_bh;
4513 + unsigned long block_group;
4514 + struct ext3_sb_info *sbi;
4515 + struct ext3_buddy e3b;
4520 + ext3_mb_poll_new_transaction(sb, handle);
4522 + sbi = EXT3_SB(sb);
4523 + es = EXT3_SB(sb)->s_es;
4524 + if (block < le32_to_cpu(es->s_first_data_block) ||
4525 + block + count < block ||
4526 + block + count > le32_to_cpu(es->s_blocks_count)) {
4527 + ext3_error (sb, __FUNCTION__,
4528 + "Freeing blocks not in datazone - "
4529 + "block = %lu, count = %lu", block, count);
4530 + goto error_return;
4533 + ext3_debug("freeing block %lu\n", block);
4535 + ac.ac_op = EXT3_MB_HISTORY_FREE;
4536 + ac.ac_inode = inode;
4541 + ext3_get_group_no_and_offset(sb, block, &block_group, &bit);
4544 + * Check to see if we are freeing blocks across a group
4547 + if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
4548 + overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
4549 + count -= overflow;
4551 + brelse(bitmap_bh);
4552 + bitmap_bh = read_block_bitmap(sb, block_group);
4554 + goto error_return;
4555 + gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
4557 + goto error_return;
4559 + if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
4560 + in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
4561 + in_range (block, le32_to_cpu(gdp->bg_inode_table),
4562 + EXT3_SB(sb)->s_itb_per_group) ||
4563 + in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
4564 + EXT3_SB(sb)->s_itb_per_group))
4565 + ext3_error(sb, __FUNCTION__,
4566 + "Freeing blocks in system zone - "
4567 + "Block = %lu, count = %lu", block, count);
4569 + BUFFER_TRACE(bitmap_bh, "getting write access");
4570 + err = ext3_journal_get_write_access(handle, bitmap_bh);
4572 + goto error_return;
4575 + * We are about to modify some metadata. Call the journal APIs
4576 + * to unshare ->b_data if a currently-committing transaction is
4579 + BUFFER_TRACE(gd_bh, "get_write_access");
4580 + err = ext3_journal_get_write_access(handle, gd_bh);
4582 + goto error_return;
4584 + err = ext3_mb_load_buddy(sb, block_group, &e3b);
4586 + goto error_return;
4588 +#ifdef AGGRESSIVE_CHECK
4591 + for (i = 0; i < count; i++)
4592 + BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4595 + mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, bit,
4598 + /* We dirtied the bitmap block */
4599 + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4600 + err = ext3_journal_dirty_metadata(handle, bitmap_bh);
4602 + ac.ac_b_ex.fe_group = block_group;
4603 + ac.ac_b_ex.fe_start = bit;
4604 + ac.ac_b_ex.fe_len = count;
4605 + ext3_mb_store_history(&ac);
4608 + /* blocks being freed are metadata. these blocks shouldn't
4609 + * be used until this transaction is committed */
4610 + ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
4612 + ext3_lock_group(sb, block_group);
4613 + err = mb_free_blocks(inode, &e3b, bit, count);
4614 + ext3_mb_return_to_preallocation(inode, &e3b, block, count);
4615 + ext3_unlock_group(sb, block_group);
4619 + spin_lock(sb_bgl_lock(sbi, block_group));
4620 + gdp->bg_free_blocks_count =
4621 + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
4622 + spin_unlock(sb_bgl_lock(sbi, block_group));
4623 + percpu_counter_mod(&sbi->s_freeblocks_counter, count);
4625 + ext3_mb_release_desc(&e3b);
4629 + /* And the group descriptor block */
4630 + BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4631 + ret = ext3_journal_dirty_metadata(handle, gd_bh);
4632 + if (!err) err = ret;
4634 + if (overflow && !err) {
4641 + brelse(bitmap_bh);
4642 + ext3_std_error(sb, err);