1 Index: linux-2.6.18-53.1.21/include/linux/ext3_fs.h
2 ===================================================================
3 --- linux-2.6.18-53.1.21.orig/include/linux/ext3_fs.h
4 +++ linux-2.6.18-53.1.21/include/linux/ext3_fs.h
6 #define ext3_debug(f, a...) do {} while (0)
9 +#define EXT3_MULTIBLOCK_ALLOCATOR 1
11 +#define EXT3_MB_HINT_MERGE 1 /* prefer goal again. length */
12 +#define EXT3_MB_HINT_RESERVED 2 /* blocks already reserved */
13 +#define EXT3_MB_HINT_METADATA 4 /* metadata is being allocated */
14 +#define EXT3_MB_HINT_FIRST 8 /* first blocks in the file */
15 +#define EXT3_MB_HINT_BEST 16 /* search for the best chunk */
16 +#define EXT3_MB_HINT_DATA 32 /* data is being allocated */
17 +#define EXT3_MB_HINT_NOPREALLOC 64 /* don't preallocate (for tails) */
18 +#define EXT3_MB_HINT_GROUP_ALLOC 128 /* allocate for locality group */
19 +#define EXT3_MB_HINT_GOAL_ONLY 256 /* allocate goal blocks or none */
20 +#define EXT3_MB_HINT_TRY_GOAL 512 /* goal is meaningful */
22 +struct ext3_allocation_request {
23 + struct inode *inode; /* target inode for block we're allocating */
24 + unsigned long logical; /* logical block in target inode */
25 + unsigned long goal; /* phys. target (a hint) */
26 + unsigned long lleft; /* the closest logical allocated block to the left */
27 + unsigned long pleft; /* phys. block for ^^^ */
28 + unsigned long lright; /* the closest logical allocated block to the right */
29 + unsigned long pright; /* phys. block for ^^^ */
30 + unsigned long len; /* how many blocks we want to allocate */
31 + unsigned long flags; /* flags. see above EXT3_MB_HINT_* */
35 * Special inodes numbers
37 @@ -398,6 +423,14 @@ struct ext3_inode {
38 #define ext3_find_first_zero_bit ext2_find_first_zero_bit
39 #define ext3_find_next_zero_bit ext2_find_next_zero_bit
41 +#ifndef ext2_find_next_le_bit
42 +#ifdef __LITTLE_ENDIAN
43 +#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
45 +#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
46 +#endif /* __LITTLE_ENDIAN */
47 +#endif /* !ext2_find_next_le_bit */
50 * Maximal mount counts between two filesystem checks
52 @@ -799,6 +832,20 @@ extern unsigned long ext3_count_dirs (st
53 extern void ext3_check_inodes_bitmap (struct super_block *);
54 extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
57 +extern long ext3_mb_stats;
58 +extern long ext3_mb_max_to_scan;
59 +extern int ext3_mb_init(struct super_block *, int);
60 +extern int ext3_mb_release(struct super_block *);
61 +extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *);
62 +extern int ext3_mb_reserve_blocks(struct super_block *, int);
63 +extern void ext3_mb_release_blocks(struct super_block *, int);
64 +extern void ext3_mb_release_blocks(struct super_block *, int);
65 +extern void ext3_mb_discard_inode_preallocations(struct inode *);
66 +extern int __init init_ext3_mb_proc(void);
67 +extern void exit_ext3_mb_proc(void);
68 +extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *);
72 int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
73 @@ -843,6 +890,10 @@ extern int ext3_group_extend(struct supe
74 ext3_fsblk_t n_blocks_count);
77 +extern struct proc_dir_entry *proc_root_ext3;
78 +extern int __init init_ext3_proc(void);
79 +extern void exit_ext3_proc(void);
81 extern void ext3_error (struct super_block *, const char *, const char *, ...)
82 __attribute__ ((format (printf, 3, 4)));
83 extern void __ext3_std_error (struct super_block *, const char *, int);
84 Index: linux-2.6.18-53.1.21/include/linux/ext3_fs_sb.h
85 ===================================================================
86 --- linux-2.6.18-53.1.21.orig/include/linux/ext3_fs_sb.h
87 +++ linux-2.6.18-53.1.21/include/linux/ext3_fs_sb.h
88 @@ -88,6 +88,68 @@ struct ext3_sb_info {
89 unsigned long s_ext_blocks;
90 unsigned long s_ext_extents;
93 + /* for buddy allocator */
94 + struct ext3_group_info ***s_group_info;
95 + struct inode *s_buddy_cache;
96 + long s_blocks_reserved;
97 + spinlock_t s_reserve_lock;
98 + struct list_head s_active_transaction;
99 + struct list_head s_closed_transaction;
100 + struct list_head s_committed_transaction;
101 + spinlock_t s_md_lock;
102 + tid_t s_last_transaction;
103 + unsigned short *s_mb_offsets, *s_mb_maxs;
106 + unsigned long s_mb_factor;
107 + unsigned long s_stripe;
108 + unsigned long s_mb_small_req;
109 + unsigned long s_mb_large_req;
110 + unsigned long s_mb_max_to_scan;
111 + unsigned long s_mb_min_to_scan;
112 + unsigned long s_mb_max_groups_to_scan;
113 + unsigned long s_mb_stats;
114 + unsigned long s_mb_order2_reqs;
115 + unsigned long *s_mb_prealloc_table;
116 + unsigned long s_mb_prealloc_table_size;
117 + unsigned long s_mb_group_prealloc;
118 + /* where last allocation was done - for stream allocation */
119 + unsigned long s_mb_last_group;
120 + unsigned long s_mb_last_start;
122 + /* history to debug policy */
123 + struct ext3_mb_history *s_mb_history;
124 + int s_mb_history_cur;
125 + int s_mb_history_max;
126 + int s_mb_history_num;
127 + struct proc_dir_entry *s_dev_proc;
128 + spinlock_t s_mb_history_lock;
129 + int s_mb_history_filter;
131 + /* stats for buddy allocator */
132 + spinlock_t s_mb_pa_lock;
133 + atomic_t s_bal_reqs; /* number of reqs with len > 1 */
134 + atomic_t s_bal_success; /* we found long enough chunks */
135 + atomic_t s_bal_allocated; /* in blocks */
136 + atomic_t s_bal_ex_scanned; /* total extents scanned */
137 + atomic_t s_bal_goals; /* goal hits */
138 + atomic_t s_bal_breaks; /* too long searches */
139 + atomic_t s_bal_2orders; /* 2^order hits */
140 + spinlock_t s_bal_lock;
141 + unsigned long s_mb_buddies_generated;
142 + unsigned long long s_mb_generation_time;
143 + atomic_t s_mb_lost_chunks;
144 + atomic_t s_mb_preallocated;
145 + atomic_t s_mb_discarded;
147 + /* locality groups */
148 + struct ext3_locality_group *s_locality_groups;
152 +#define EXT3_GROUP_INFO(sb, group) \
153 + EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
154 + [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
156 #endif /* _LINUX_EXT3_FS_SB */
157 Index: linux-2.6.18-53.1.21/fs/ext3/super.c
158 ===================================================================
159 --- linux-2.6.18-53.1.21.orig/fs/ext3/super.c
160 +++ linux-2.6.18-53.1.21/fs/ext3/super.c
161 @@ -391,6 +391,7 @@ static void ext3_put_super (struct super
162 struct ext3_super_block *es = sbi->s_es;
165 + ext3_mb_release(sb);
166 ext3_ext_release(sb);
167 ext3_xattr_put_super(sb);
168 journal_destroy(sbi->s_journal);
169 @@ -433,6 +434,8 @@ static void ext3_put_super (struct super
170 invalidate_bdev(sbi->journal_bdev, 0);
171 ext3_blkdev_remove(sbi);
173 + remove_proc_entry(sb->s_id, proc_root_ext3);
174 + sbi->s_dev_proc = NULL;
175 sb->s_fs_info = NULL;
178 @@ -458,6 +461,8 @@ static struct inode *ext3_alloc_inode(st
179 ei->vfs_inode.i_version = 1;
181 memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
182 + INIT_LIST_HEAD(&ei->i_prealloc_list);
183 + spin_lock_init(&ei->i_prealloc_lock);
184 return &ei->vfs_inode;
187 @@ -1454,6 +1459,13 @@ static int ext3_fill_super (struct super
188 sbi->s_mount_opt = 0;
189 sbi->s_resuid = EXT3_DEF_RESUID;
190 sbi->s_resgid = EXT3_DEF_RESGID;
191 + sbi->s_dev_proc = proc_mkdir(sb->s_id, proc_root_ext3);
192 + if (sbi->s_dev_proc == NULL) {
193 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", sb->s_id);
194 + sb->s_fs_info = NULL;
201 @@ -1857,6 +1869,8 @@ failed_mount:
202 ext3_blkdev_remove(sbi);
205 + remove_proc_entry(sb->s_id, proc_root_ext3);
206 + sbi->s_dev_proc = NULL;
207 sb->s_fs_info = NULL;
210 @@ -2782,9 +2796,46 @@ static struct file_system_type ext3_fs_t
211 .fs_flags = FS_REQUIRES_DEV,
214 +#define EXT3_ROOT "ext3"
215 +struct proc_dir_entry *proc_root_ext3;
217 +int __init init_ext3_proc(void)
221 + if ((ret = init_ext3_mb_proc()))
224 + proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
225 + if (proc_root_ext3 == NULL) {
226 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
234 + exit_ext3_mb_proc();
239 +void exit_ext3_proc(void)
241 + exit_ext3_mb_proc();
242 + remove_proc_entry(EXT3_ROOT, proc_root_fs);
245 static int __init init_ext3_fs(void)
247 - int err = init_ext3_xattr();
250 + err = init_ext3_proc();
254 + err = init_ext3_xattr();
257 err = init_inodecache();
258 @@ -2806,6 +2857,7 @@ static void __exit exit_ext3_fs(void)
259 unregister_filesystem(&ext3_fs_type);
260 destroy_inodecache();
265 int ext3_map_inode_page(struct inode *inode, struct page *page,
266 Index: linux-2.6.18-53.1.21/fs/ext3/mballoc.c
267 ===================================================================
269 +++ linux-2.6.18-53.1.21/fs/ext3/mballoc.c
272 + * Copyright 2008 Sun Microsystems, Inc.
273 + * Written by Alex Tomas <alex@clusterfs.com>
275 + * This program is free software; you can redistribute it and/or modify
276 + * it under the terms of the GNU General Public License version 2 as
277 + * published by the Free Software Foundation.
279 + * This program is distributed in the hope that it will be useful,
280 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
281 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
282 + * GNU General Public License for more details.
284 + * You should have received a copy of the GNU General Public Licens
285 + * along with this program; if not, write to the Free Software
286 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
291 + * mballoc.c contains the multiblocks allocation routines
294 +#include <linux/time.h>
295 +#include <linux/fs.h>
296 +#include <linux/namei.h>
297 +#include <linux/ext3_jbd.h>
298 +#include <linux/jbd.h>
299 +#include <linux/ext3_fs.h>
300 +#include <linux/quotaops.h>
301 +#include <linux/buffer_head.h>
302 +#include <linux/module.h>
303 +#include <linux/swap.h>
304 +#include <linux/proc_fs.h>
305 +#include <linux/pagemap.h>
306 +#include <linux/seq_file.h>
307 +#include <linux/version.h>
311 + * - test ext3_ext_search_left() and ext3_ext_search_right()
312 + * - search for metadata in few groups
315 + * - normalization should take into account whether file is still open
316 + * - discard preallocations if no free space left (policy?)
317 + * - don't normalize tails
319 + * - reservation for superuser
322 + * - bitmap read-ahead (proposed by Oleg Drokin aka green)
323 + * - track min/max extents in each group for better group selection
324 + * - mb_mark_used() may allocate chunk right after splitting buddy
325 + * - tree of groups sorted by number of free blocks
330 + * mballoc operates on the following data:
332 + * - in-core buddy (actually includes buddy and bitmap)
333 + * - preallocation descriptors (PAs)
335 + * there are two types of preallocations:
337 + * assiged to specific inode and can be used for this inode only.
338 + * it describes part of inode's space preallocated to specific
339 + * physical blocks. any block from that preallocated can be used
340 + * independent. the descriptor just tracks number of blocks left
341 + * unused. so, before taking some block from descriptor, one must
342 + * make sure corresponded logical block isn't allocated yet. this
343 + * also means that freeing any block within descriptor's range
344 + * must discard all preallocated blocks.
346 + * assigned to specific locality group which does not translate to
347 + * permanent set of inodes: inode can join and leave group. space
348 + * from this type of preallocation can be used for any inode. thus
349 + * it's consumed from the beginning to the end.
351 + * relation between them can be expressed as:
352 + * in-core buddy = on-disk bitmap + preallocation descriptors
354 + * this mean blocks mballoc considers used are:
355 + * - allocated blocks (persistent)
356 + * - preallocated blocks (non-persistent)
358 + * consistency in mballoc world means that at any time a block is either
359 + * free or used in ALL structures. notice: "any time" should not be read
360 + * literally -- time is discrete and delimited by locks.
362 + * to keep it simple, we don't use block numbers, instead we count number of
363 + * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
365 + * all operations can be expressed as:
366 + * - init buddy: buddy = on-disk + PAs
367 + * - new PA: buddy += N; PA = N
368 + * - use inode PA: on-disk += N; PA -= N
369 + * - discard inode PA buddy -= on-disk - PA; PA = 0
370 + * - use locality group PA on-disk += N; PA -= N
371 + * - discard locality group PA buddy -= PA; PA = 0
372 + * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
373 + * is used in real operation because we can't know actual used
374 + * bits from PA, only from on-disk bitmap
376 + * if we follow this strict logic, then all operations above should be atomic.
377 + * given some of them can block, we'd have to use something like semaphores
378 + * killing performance on high-end SMP hardware. let's try to relax it using
379 + * the following knowledge:
380 + * 1) if buddy is referenced, it's already initialized
381 + * 2) while block is used in buddy and the buddy is referenced,
382 + * nobody can re-allocate that block
383 + * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
384 + * bit set and PA claims same block, it's OK. IOW, one can set bit in
385 + * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
388 + * so, now we're building a concurrency table:
391 + * blocks for PA are allocated in the buddy, buddy must be referenced
392 + * until PA is linked to allocation group to avoid concurrent buddy init
394 + * we need to make sure that either on-disk bitmap or PA has uptodate data
395 + * given (3) we care that PA-=N operation doesn't interfere with init
396 + * - discard inode PA
397 + * the simplest way would be to have buddy initialized by the discard
398 + * - use locality group PA
399 + * again PA-=N must be serialized with init
400 + * - discard locality group PA
401 + * the simplest way would be to have buddy initialized by the discard
404 + * i_truncate_mutex serializes them
405 + * - discard inode PA
406 + * discard process must wait until PA isn't used by another process
407 + * - use locality group PA
408 + * some mutex should serialize them
409 + * - discard locality group PA
410 + * discard process must wait until PA isn't used by another process
413 + * i_truncate_mutex or another mutex should serializes them
414 + * - discard inode PA
415 + * discard process must wait until PA isn't used by another process
416 + * - use locality group PA
417 + * nothing wrong here -- they're different PAs covering different blocks
418 + * - discard locality group PA
419 + * discard process must wait until PA isn't used by another process
421 + * now we're ready to make few consequences:
422 + * - PA is referenced and while it is no discard is possible
423 + * - PA is referenced until block isn't marked in on-disk bitmap
424 + * - PA changes only after on-disk bitmap
425 + * - discard must not compete with init. either init is done before
426 + * any discard or they're serialized somehow
427 + * - buddy init as sum of on-disk bitmap and PAs is done atomically
429 + * a special case when we've used PA to emptiness. no need to modify buddy
430 + * in this case, but we should care about concurrent init
435 + * Logic in few words:
440 + * mark bits in on-disk bitmap
443 + * - use preallocation:
444 + * find proper PA (per-inode or group)
446 + * mark bits in on-disk bitmap
452 + * mark bits in on-disk bitmap
455 + * - discard preallocations in group:
457 + * move them onto local list
458 + * load on-disk bitmap
460 + * remove PA from object (inode or locality group)
461 + * mark free blocks in-core
463 + * - discard inode's preallocations:
470 + * - bitlock on a group (group)
471 + * - object (inode/locality) (object)
472 + * - per-pa lock (pa)
479 + * - find and use pa:
482 + * - release consumed pa:
487 + * - generate in-core bitmap:
491 + * - discard all for given object (inode, locality group):
496 + * - discard all for given group:
505 + * with AGGRESSIVE_CHECK allocator runs consistency checks over
506 + * structures. these checks slow things down a lot
508 +#define AGGRESSIVE_CHECK__
511 + * with DOUBLE_CHECK defined mballoc creates persistent in-core
512 + * bitmaps, maintains and uses them to check for double allocations
514 +#define DOUBLE_CHECK__
520 +#define mb_debug(fmt,a...) printk(fmt, ##a)
522 +#define mb_debug(fmt,a...)
526 + * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
527 + * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
529 +#define EXT3_MB_HISTORY
530 +#define EXT3_MB_HISTORY_ALLOC 1 /* allocation */
531 +#define EXT3_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
532 +#define EXT3_MB_HISTORY_DISCARD 4 /* preallocation discarded */
533 +#define EXT3_MB_HISTORY_FREE 8 /* free */
535 +#define EXT3_MB_HISTORY_DEFAULT (EXT3_MB_HISTORY_ALLOC | \
536 + EXT3_MB_HISTORY_PREALLOC | \
537 + EXT3_MB_HISTORY_DISCARD | \
538 + EXT3_MB_HISTORY_FREE)
541 + * How long mballoc can look for a best extent (in found extents)
543 +#define MB_DEFAULT_MAX_TO_SCAN 200
546 + * How long mballoc must look for a best extent
548 +#define MB_DEFAULT_MIN_TO_SCAN 10
551 + * How many groups mballoc will scan looking for the best chunk
553 +#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
556 + * with 'ext3_mb_stats' allocator will collect stats that will be
557 + * shown at umount. The collecting costs though!
559 +#define MB_DEFAULT_STATS 1
562 + * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
563 + * by the stream allocator, which purpose is to pack requests
564 + * as close each to other as possible to produce smooth I/O traffic
566 +#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
569 + * for which requests use 2^N search using buddies
571 +#define MB_DEFAULT_ORDER2_REQS 8
574 + * default stripe size = 1MB
576 +#define MB_DEFAULT_STRIPE 256
578 +static kmem_cache_t *ext3_pspace_cachep = NULL;
580 +#ifdef EXT3_BB_MAX_BLOCKS
581 +#undef EXT3_BB_MAX_BLOCKS
583 +#define EXT3_BB_MAX_BLOCKS 30
585 +struct ext3_free_metadata {
587 + unsigned short num;
588 + unsigned short blocks[EXT3_BB_MAX_BLOCKS];
589 + struct list_head list;
592 +struct ext3_group_info {
593 + unsigned long bb_state;
594 + unsigned long bb_tid;
595 + struct ext3_free_metadata *bb_md_cur;
596 + unsigned short bb_first_free;
597 + unsigned short bb_free;
598 + unsigned short bb_fragments;
599 + struct list_head bb_prealloc_list;
600 + unsigned long bb_prealloc_nr;
604 + unsigned short bb_counters[];
607 +#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
608 +#define EXT3_GROUP_INFO_LOCKED_BIT 1
610 +#define EXT3_MB_GRP_NEED_INIT(grp) \
611 + (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
614 +struct ext3_prealloc_space {
615 + struct list_head pa_inode_list;
616 + struct list_head pa_group_list;
618 + struct list_head pa_tmp_list;
619 + struct rcu_head pa_rcu;
621 + spinlock_t pa_lock;
623 + unsigned pa_deleted;
624 + unsigned long pa_pstart; /* phys. block */
625 + unsigned long pa_lstart; /* log. block */
626 + unsigned short pa_len; /* len of preallocated chunk */
627 + unsigned short pa_free; /* how many blocks are free */
628 + unsigned short pa_linear; /* consumed in one direction
629 + * strictly, for group prealloc */
630 + spinlock_t *pa_obj_lock;
631 + struct inode *pa_inode; /* hack, for history only */
635 +struct ext3_free_extent {
636 + unsigned long fe_logical;
637 + unsigned long fe_start;
638 + unsigned long fe_group;
639 + unsigned long fe_len;
644 + * we try to group all related changes together
645 + * so that writeback can flush/allocate them together as well
647 +struct ext3_locality_group {
648 + /* for allocator */
649 + struct semaphore lg_sem; /* to serialize allocates */
650 + struct list_head lg_prealloc_list;/* list of preallocations */
651 + spinlock_t lg_prealloc_lock;
654 +struct ext3_allocation_context {
655 + struct inode *ac_inode;
656 + struct super_block *ac_sb;
658 + /* original request */
659 + struct ext3_free_extent ac_o_ex;
661 + /* goal request (after normalization) */
662 + struct ext3_free_extent ac_g_ex;
664 + /* the best found extent */
665 + struct ext3_free_extent ac_b_ex;
667 + /* copy of the bext found extent taken before preallocation efforts */
668 + struct ext3_free_extent ac_f_ex;
670 + /* number of iterations done. we have to track to limit searching */
671 + unsigned long ac_ex_scanned;
672 + __u16 ac_groups_scanned;
676 + __u16 ac_flags; /* allocation hints */
680 + __u8 ac_2order; /* if request is to allocate 2^N blocks and
681 + * N > 0, the field stores N, otherwise 0 */
682 + __u8 ac_op; /* operation, for history only */
683 + struct page *ac_bitmap_page;
684 + struct page *ac_buddy_page;
685 + struct ext3_prealloc_space *ac_pa;
686 + struct ext3_locality_group *ac_lg;
689 +#define AC_STATUS_CONTINUE 1
690 +#define AC_STATUS_FOUND 2
691 +#define AC_STATUS_BREAK 3
693 +struct ext3_mb_history {
694 + struct ext3_free_extent orig; /* orig allocation */
695 + struct ext3_free_extent goal; /* goal allocation */
696 + struct ext3_free_extent result; /* result allocation */
699 + __u16 found; /* how many extents have been found */
700 + __u16 groups; /* how many groups have been scanned */
701 + __u16 tail; /* what tail broke some buddy */
702 + __u16 buddy; /* buddy the tail ^^^ broke */
704 + __u8 cr:8; /* which phase the result extent was found at */
710 + struct page *bd_buddy_page;
712 + struct page *bd_bitmap_page;
714 + struct ext3_group_info *bd_info;
715 + struct super_block *bd_sb;
717 + unsigned bd_blkbits;
719 +#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
720 +#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
722 +#ifndef EXT3_MB_HISTORY
723 +#define ext3_mb_store_history(ac)
725 +static void ext3_mb_store_history(struct ext3_allocation_context *ac);
728 +#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
730 +int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
731 +struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
732 +unsigned long ext3_new_blocks_old(handle_t *handle, struct inode *inode,
733 + unsigned long goal, unsigned long *count, int *errp);
734 +void ext3_mb_release_blocks(struct super_block *, int);
735 +void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
736 +void ext3_mb_free_committed_blocks(struct super_block *);
737 +int ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group);
738 +void ext3_mb_free_consumed_preallocations(struct ext3_allocation_context *ac);
739 +void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
740 + sector_t block, int count);
741 +void ext3_mb_show_ac(struct ext3_allocation_context *ac);
742 +void ext3_mb_check_with_pa(struct ext3_buddy *e3b, int first, int count);
743 +void ext3_mb_put_pa(struct ext3_allocation_context *, struct super_block *, struct ext3_prealloc_space *pa);
744 +int ext3_mb_init_per_dev_proc(struct super_block *sb);
745 +int ext3_mb_destroy_per_dev_proc(struct super_block *sb);
748 + * Calculate the block group number and offset, given a block number
750 +static void ext3_get_group_no_and_offset(struct super_block *sb,
751 + unsigned long blocknr,
752 + unsigned long *blockgrpp,
753 + unsigned long *offsetp)
755 + struct ext3_super_block *es = EXT3_SB(sb)->s_es;
756 + unsigned long offset;
758 + blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
759 + offset = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
760 + blocknr = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
764 + *blockgrpp = blocknr;
769 +ext3_lock_group(struct super_block *sb, int group)
771 + bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
772 + &EXT3_GROUP_INFO(sb, group)->bb_state);
776 +ext3_unlock_group(struct super_block *sb, int group)
778 + bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
779 + &EXT3_GROUP_INFO(sb, group)->bb_state);
783 +ext3_is_group_locked(struct super_block *sb, int group)
785 + return bit_spin_is_locked(EXT3_GROUP_INFO_LOCKED_BIT,
786 + &EXT3_GROUP_INFO(sb, group)->bb_state);
789 +unsigned long ext3_grp_offs_to_block(struct super_block *sb,
790 + struct ext3_free_extent *fex)
792 + unsigned long block;
794 + block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
796 + + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
800 +#if BITS_PER_LONG == 64
801 +#define mb_correct_addr_and_bit(bit,addr) \
803 + bit += ((unsigned long) addr & 7UL) << 3; \
804 + addr = (void *) ((unsigned long) addr & ~7UL); \
806 +#elif BITS_PER_LONG == 32
807 +#define mb_correct_addr_and_bit(bit,addr) \
809 + bit += ((unsigned long) addr & 3UL) << 3; \
810 + addr = (void *) ((unsigned long) addr & ~3UL); \
813 +#error "how many bits you are?!"
816 +static inline int mb_test_bit(int bit, void *addr)
818 + mb_correct_addr_and_bit(bit,addr);
819 + return ext2_test_bit(bit, addr);
822 +static inline void mb_set_bit(int bit, void *addr)
824 + mb_correct_addr_and_bit(bit,addr);
825 + ext2_set_bit(bit, addr);
828 +static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
830 + mb_correct_addr_and_bit(bit,addr);
831 + ext2_set_bit_atomic(lock, bit, addr);
834 +static inline void mb_clear_bit(int bit, void *addr)
836 + mb_correct_addr_and_bit(bit,addr);
837 + ext2_clear_bit(bit, addr);
840 +static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
842 + mb_correct_addr_and_bit(bit,addr);
843 + ext2_clear_bit_atomic(lock, bit, addr);
846 +static inline int mb_find_next_zero_bit(void *addr, int max, int start)
849 +#if BITS_PER_LONG == 64
850 + fix = ((unsigned long) addr & 7UL) << 3;
851 + addr = (void *) ((unsigned long) addr & ~7UL);
852 +#elif BITS_PER_LONG == 32
853 + fix = ((unsigned long) addr & 3UL) << 3;
854 + addr = (void *) ((unsigned long) addr & ~3UL);
856 +#error "how many bits you are?!"
860 + return ext2_find_next_zero_bit(addr, max, start) - fix;
863 +static inline int mb_find_next_bit(void *addr, int max, int start)
866 +#if BITS_PER_LONG == 64
867 + fix = ((unsigned long) addr & 7UL) << 3;
868 + addr = (void *) ((unsigned long) addr & ~7UL);
869 +#elif BITS_PER_LONG == 32
870 + fix = ((unsigned long) addr & 3UL) << 3;
871 + addr = (void *) ((unsigned long) addr & ~3UL);
873 +#error "how many bits you are?!"
880 + return find_next_bit(addr, max, start) - fix;
884 +static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
888 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
889 + BUG_ON(max == NULL);
891 + if (order > e3b->bd_blkbits + 1) {
896 + /* at order 0 we see each particular block */
897 + *max = 1 << (e3b->bd_blkbits + 3);
899 + return EXT3_MB_BITMAP(e3b);
901 + bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
902 + *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
908 +void mb_free_blocks_double(struct inode *inode, struct ext3_buddy *e3b,
909 + int first, int count)
912 + struct super_block *sb = e3b->bd_sb;
914 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
916 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
917 + for (i = 0; i < count; i++) {
918 + if (!mb_test_bit(first + i, e3b->bd_info->bb_bitmap)) {
919 + unsigned long blocknr;
920 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
921 + blocknr += first + i;
923 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
925 + ext3_error(sb, __FUNCTION__, "double-free of inode"
926 + " %lu's block %lu(bit %u in group %u)\n",
927 + inode ? inode->i_ino : 0, blocknr,
928 + first + i, e3b->bd_group);
930 + mb_clear_bit(first + i, e3b->bd_info->bb_bitmap);
934 +void mb_mark_used_double(struct ext3_buddy *e3b, int first, int count)
937 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
939 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
940 + for (i = 0; i < count; i++) {
941 + BUG_ON(mb_test_bit(first + i, e3b->bd_info->bb_bitmap));
942 + mb_set_bit(first + i, e3b->bd_info->bb_bitmap);
946 +void mb_cmp_bitmaps(struct ext3_buddy *e3b, void *bitmap)
948 + if (memcmp(e3b->bd_info->bb_bitmap, bitmap, e3b->bd_sb->s_blocksize)) {
949 + unsigned char *b1, *b2;
951 + b1 = (unsigned char *) e3b->bd_info->bb_bitmap;
952 + b2 = (unsigned char *) bitmap;
953 + for (i = 0; i < e3b->bd_sb->s_blocksize; i++) {
954 + if (b1[i] != b2[i]) {
955 + printk("corruption in group %u at byte %u(%u): "
956 + "%x in copy != %x on disk/prealloc\n",
957 + e3b->bd_group, i, i * 8, b1[i], b2[i]);
965 +#define mb_free_blocks_double(a,b,c,d)
966 +#define mb_mark_used_double(a,b,c)
967 +#define mb_cmp_bitmaps(a,b)
970 +#ifdef AGGRESSIVE_CHECK
972 +#define MB_CHECK_ASSERT(assert) \
975 + printk (KERN_EMERG \
976 + "Assertion failure in %s() at %s:%d: \"%s\"\n", \
977 + function, file, line, # assert); \
982 +static int __mb_check_buddy(struct ext3_buddy *e3b, char *file,
983 + const char *function, int line)
985 + struct super_block *sb = e3b->bd_sb;
986 + int order = e3b->bd_blkbits + 1;
987 + int max, max2, i, j, k, count;
988 + struct ext3_group_info *grp;
989 + int fragments = 0, fstart;
990 + struct list_head *cur;
991 + void *buddy, *buddy2;
993 + if (!test_opt(sb, MBALLOC))
997 + static int mb_check_counter = 0;
998 + if (mb_check_counter++ % 100 != 0)
1002 + while (order > 1) {
1003 + buddy = mb_find_buddy(e3b, order, &max);
1004 + MB_CHECK_ASSERT(buddy);
1005 + buddy2 = mb_find_buddy(e3b, order - 1, &max2);
1006 + MB_CHECK_ASSERT(buddy2);
1007 + MB_CHECK_ASSERT(buddy != buddy2);
1008 + MB_CHECK_ASSERT(max * 2 == max2);
1011 + for (i = 0; i < max; i++) {
1013 + if (mb_test_bit(i, buddy)) {
1014 + /* only single bit in buddy2 may be 1 */
1015 + if (!mb_test_bit(i << 1, buddy2))
1016 + MB_CHECK_ASSERT(mb_test_bit((i<<1)+1, buddy2));
1017 + else if (!mb_test_bit((i << 1) + 1, buddy2))
1018 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1022 + /* both bits in buddy2 must be 0 */
1023 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1024 + MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
1026 + for (j = 0; j < (1 << order); j++) {
1027 + k = (i * (1 << order)) + j;
1028 + MB_CHECK_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
1032 + MB_CHECK_ASSERT(e3b->bd_info->bb_counters[order] == count);
1037 + buddy = mb_find_buddy(e3b, 0, &max);
1038 + for (i = 0; i < max; i++) {
1039 + if (!mb_test_bit(i, buddy)) {
1040 + MB_CHECK_ASSERT(i >= e3b->bd_info->bb_first_free);
1041 + if (fstart == -1) {
1048 + /* check used bits only */
1049 + for (j = 0; j < e3b->bd_blkbits + 1; j++) {
1050 + buddy2 = mb_find_buddy(e3b, j, &max2);
1052 + MB_CHECK_ASSERT(k < max2);
1053 + MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
1056 + MB_CHECK_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
1057 + MB_CHECK_ASSERT(e3b->bd_info->bb_fragments == fragments);
1059 + grp = EXT3_GROUP_INFO(sb, e3b->bd_group);
1060 + buddy = mb_find_buddy(e3b, 0, &max);
1061 + list_for_each(cur, &grp->bb_prealloc_list) {
1062 + unsigned long groupnr;
1063 + struct ext3_prealloc_space *pa;
1064 + pa = list_entry(cur, struct ext3_prealloc_space, group_list);
1065 + ext3_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
1066 + MB_CHECK_ASSERT(groupnr == e3b->bd_group);
1067 + for (i = 0; i < pa->len; i++)
1068 + MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
1072 +#undef MB_CHECK_ASSERT
1073 +#define mb_check_buddy(e3b) __mb_check_buddy(e3b,__FILE__,__FUNCTION__,__LINE__)
1075 +#define mb_check_buddy(e3b)
1078 +/* find most significant bit */
1079 +static int inline fmsb(unsigned short word)
1093 + } while (word != 0);
1099 +ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
1100 + int len, struct ext3_group_info *grp)
1102 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1103 + unsigned short min, max, chunk, border;
1105 + BUG_ON(len >= EXT3_BLOCKS_PER_GROUP(sb));
1107 + border = 2 << sb->s_blocksize_bits;
1110 + /* find how many blocks can be covered since this position */
1111 + max = ffs(first | border) - 1;
1113 + /* find how many blocks of power 2 we need to mark */
1120 + /* mark multiblock chunks only */
1121 + grp->bb_counters[min]++;
1123 + mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
1131 +ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
1134 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
1135 + unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
1136 + unsigned short i = 0, first, len;
1137 + unsigned free = 0, fragments = 0;
1138 + unsigned long long period = get_cycles();
1140 + /* initialize buddy from bitmap which is aggregation
1141 + * of on-disk bitmap and preallocations */
1142 + i = mb_find_next_zero_bit(bitmap, max, 0);
1143 + grp->bb_first_free = i;
1147 + i = ext2_find_next_le_bit(bitmap, max, i);
1153 + ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
1155 + grp->bb_counters[0]++;
1157 + i = mb_find_next_zero_bit(bitmap, max, i);
1159 + grp->bb_fragments = fragments;
1161 + if (free != grp->bb_free) {
1162 + struct ext3_group_desc *gdp;
1163 + gdp = ext3_get_group_desc (sb, group, NULL);
1164 + ext3_error(sb, __FUNCTION__,
1165 + "group %u: %u blocks in bitmap, %u in bb, "
1166 + "%u in gd, %lu pa's\n", group, free, grp->bb_free,
1167 + le16_to_cpu(gdp->bg_free_blocks_count),
1168 + grp->bb_prealloc_nr);
1172 + clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
1174 + period = get_cycles() - period;
1175 + spin_lock(&EXT3_SB(sb)->s_bal_lock);
1176 + EXT3_SB(sb)->s_mb_buddies_generated++;
1177 + EXT3_SB(sb)->s_mb_generation_time += period;
1178 + spin_unlock(&EXT3_SB(sb)->s_bal_lock);
1183 +static int ext3_mb_init_cache(struct page *page, char *incore)
1185 + int blocksize, blocks_per_page, groups_per_page;
1186 + int err = 0, i, first_group, first_block;
1187 + struct super_block *sb;
1188 + struct buffer_head *bhs;
1189 + struct buffer_head **bh;
1190 + struct inode *inode;
1191 + char *data, *bitmap;
1193 + mb_debug("init page %lu\n", page->index);
1195 + inode = page->mapping->host;
1197 + blocksize = 1 << inode->i_blkbits;
1198 + blocks_per_page = PAGE_CACHE_SIZE / blocksize;
1200 + groups_per_page = blocks_per_page >> 1;
1201 + if (groups_per_page == 0)
1202 + groups_per_page = 1;
1204 + /* allocate buffer_heads to read bitmaps */
1205 + if (groups_per_page > 1) {
1207 + i = sizeof(struct buffer_head *) * groups_per_page;
1208 + bh = kmalloc(i, GFP_NOFS);
1215 + first_group = page->index * blocks_per_page / 2;
1217 + /* read all groups the page covers into the cache */
1218 + for (i = 0; i < groups_per_page; i++) {
1219 + struct ext3_group_desc * desc;
1221 + if (first_group + i >= EXT3_SB(sb)->s_groups_count)
1225 + desc = ext3_get_group_desc(sb, first_group + i, NULL);
1230 + bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
1231 + if (bh[i] == NULL)
1234 + if (buffer_uptodate(bh[i]))
1237 + lock_buffer(bh[i]);
1238 + if (buffer_uptodate(bh[i])) {
1239 + unlock_buffer(bh[i]);
1244 + bh[i]->b_end_io = end_buffer_read_sync;
1245 + submit_bh(READ, bh[i]);
1246 + mb_debug("read bitmap for group %u\n", first_group + i);
1249 + /* wait for I/O completion */
1250 + for (i = 0; i < groups_per_page && bh[i]; i++)
1251 + wait_on_buffer(bh[i]);
1254 + for (i = 0; i < groups_per_page && bh[i]; i++)
1255 + if (!buffer_uptodate(bh[i]))
1259 + first_block = page->index * blocks_per_page;
1260 + for (i = 0; i < blocks_per_page && err == 0; i++) {
1263 + group = (first_block + i) >> 1;
1264 + if (group >= EXT3_SB(sb)->s_groups_count)
1267 + data = page_address(page) + (i * blocksize);
1268 + bitmap = bh[group - first_group]->b_data;
1270 + if ((first_block + i) & 1) {
1271 + /* this is block of buddy */
1272 + BUG_ON(incore == NULL);
1273 + mb_debug("put buddy for group %u in page %lu/%x\n",
1274 + group, page->index, i * blocksize);
1275 + memset(data, 0xff, blocksize);
1276 + EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
1277 + memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
1278 + sizeof(unsigned short)*(sb->s_blocksize_bits+2));
1279 + err = ext3_mb_generate_buddy(sb, data, incore, group);
1282 + /* this is block of bitmap */
1283 + BUG_ON(incore != NULL);
1284 + mb_debug("put bitmap for group %u in page %lu/%x\n",
1285 + group, page->index, i * blocksize);
1287 + /* see comments in ext3_mb_put_pa() */
1288 + ext3_lock_group(sb, group);
1289 + memcpy(data, bitmap, blocksize);
1291 + /* mark all preallocated blocks used in in-core bitmap */
1292 + err = ext3_mb_generate_from_pa(sb, data, group);
1293 + ext3_unlock_group(sb, group);
1298 + if (likely(err == 0))
1299 + SetPageUptodate(page);
1303 + for (i = 0; i < groups_per_page && bh[i]; i++)
1311 +static int ext3_mb_load_buddy(struct super_block *sb, int group,
1312 + struct ext3_buddy *e3b)
1314 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1315 + struct inode *inode = sbi->s_buddy_cache;
1316 + int blocks_per_page, block, pnum, poff;
1317 + struct page *page;
1319 + mb_debug("load group %u\n", group);
1321 + blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1323 + e3b->bd_blkbits = sb->s_blocksize_bits;
1324 + e3b->bd_info = EXT3_GROUP_INFO(sb, group);
1326 + e3b->bd_group = group;
1327 + e3b->bd_buddy_page = NULL;
1328 + e3b->bd_bitmap_page = NULL;
1330 + block = group * 2;
1331 + pnum = block / blocks_per_page;
1332 + poff = block % blocks_per_page;
1334 + /* we could use find_or_create_page(), but it locks page
1335 + * what we'd like to avoid in fast path ... */
1336 + page = find_get_page(inode->i_mapping, pnum);
1337 + if (page == NULL || !PageUptodate(page)) {
1339 + page_cache_release(page);
1340 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1342 + BUG_ON(page->mapping != inode->i_mapping);
1343 + if (!PageUptodate(page)) {
1344 + ext3_mb_init_cache(page, NULL);
1345 + mb_cmp_bitmaps(e3b, page_address(page) +
1346 + (poff * sb->s_blocksize));
1348 + unlock_page(page);
1351 + if (page == NULL || !PageUptodate(page))
1353 + e3b->bd_bitmap_page = page;
1354 + e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1355 + mark_page_accessed(page);
1358 + pnum = block / blocks_per_page;
1359 + poff = block % blocks_per_page;
1361 + page = find_get_page(inode->i_mapping, pnum);
1362 + if (page == NULL || !PageUptodate(page)) {
1364 + page_cache_release(page);
1365 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1367 + BUG_ON(page->mapping != inode->i_mapping);
1368 + if (!PageUptodate(page))
1369 + ext3_mb_init_cache(page, e3b->bd_bitmap);
1371 + unlock_page(page);
1374 + if (page == NULL || !PageUptodate(page))
1376 + e3b->bd_buddy_page = page;
1377 + e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1378 + mark_page_accessed(page);
1380 + BUG_ON(e3b->bd_bitmap_page == NULL);
1381 + BUG_ON(e3b->bd_buddy_page == NULL);
1386 + if (e3b->bd_bitmap_page)
1387 + page_cache_release(e3b->bd_bitmap_page);
1388 + if (e3b->bd_buddy_page)
1389 + page_cache_release(e3b->bd_buddy_page);
1390 + e3b->bd_buddy = NULL;
1391 + e3b->bd_bitmap = NULL;
1395 +static void ext3_mb_release_desc(struct ext3_buddy *e3b)
1397 + if (e3b->bd_bitmap_page)
1398 + page_cache_release(e3b->bd_bitmap_page);
1399 + if (e3b->bd_buddy_page)
1400 + page_cache_release(e3b->bd_buddy_page);
1404 +static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
1409 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
1410 + BUG_ON(block >= (1 << (e3b->bd_blkbits + 3)));
1412 + bb = EXT3_MB_BUDDY(e3b);
1413 + while (order <= e3b->bd_blkbits + 1) {
1414 + block = block >> 1;
1415 + if (!mb_test_bit(block, bb)) {
1416 + /* this block is part of buddy of order 'order' */
1419 + bb += 1 << (e3b->bd_blkbits - order);
1425 +static inline void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1430 + while (cur < len) {
1431 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1432 + /* fast path: clear whole word at once */
1433 + addr = bm + (cur >> 3);
1438 + mb_clear_bit_atomic(lock, cur, bm);
1443 +static inline void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1448 + while (cur < len) {
1449 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1450 + /* fast path: clear whole word at once */
1451 + addr = bm + (cur >> 3);
1452 + *addr = 0xffffffff;
1456 + mb_set_bit_atomic(lock, cur, bm);
1461 +static int mb_free_blocks(struct inode *inode, struct ext3_buddy *e3b,
1462 + int first, int count)
1464 + int block = 0, max = 0, order;
1465 + void *buddy, *buddy2;
1466 + struct super_block *sb = e3b->bd_sb;
1468 + BUG_ON(first + count > (sb->s_blocksize << 3));
1469 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
1470 + mb_check_buddy(e3b);
1471 + mb_free_blocks_double(inode, e3b, first, count);
1473 + e3b->bd_info->bb_free += count;
1474 + if (first < e3b->bd_info->bb_first_free)
1475 + e3b->bd_info->bb_first_free = first;
1477 + /* let's maintain fragments counter */
1479 + block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
1480 + if (first + count < EXT3_SB(sb)->s_mb_maxs[0])
1481 + max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
1483 + e3b->bd_info->bb_fragments--;
1484 + else if (!block && !max)
1485 + e3b->bd_info->bb_fragments++;
1487 + /* let's maintain buddy itself */
1488 + while (count-- > 0) {
1492 + if (!mb_test_bit(block, EXT3_MB_BITMAP(e3b))) {
1493 + unsigned long blocknr;
1494 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
1497 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
1499 + ext3_error(sb, __FUNCTION__, "double-free of inode"
1500 + " %lu's block %lu(bit %u in group %u)\n",
1501 + inode ? inode->i_ino : 0, blocknr, block,
1504 + mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
1505 + e3b->bd_info->bb_counters[order]++;
1507 + /* start of the buddy */
1508 + buddy = mb_find_buddy(e3b, order, &max);
1512 + if (mb_test_bit(block, buddy) ||
1513 + mb_test_bit(block + 1, buddy))
1516 + /* both the buddies are free, try to coalesce them */
1517 + buddy2 = mb_find_buddy(e3b, order + 1, &max);
1523 + /* for special purposes, we don't set
1524 + * free bits in bitmap */
1525 + mb_set_bit(block, buddy);
1526 + mb_set_bit(block + 1, buddy);
1528 + e3b->bd_info->bb_counters[order]--;
1529 + e3b->bd_info->bb_counters[order]--;
1531 + block = block >> 1;
1533 + e3b->bd_info->bb_counters[order]++;
1535 + mb_clear_bit(block, buddy2);
1539 + mb_check_buddy(e3b);
1544 +static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
1545 + int needed, struct ext3_free_extent *ex)
1547 + int next = block, max, ord;
1550 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1551 + BUG_ON(ex == NULL);
1553 + buddy = mb_find_buddy(e3b, order, &max);
1554 + BUG_ON(buddy == NULL);
1555 + BUG_ON(block >= max);
1556 + if (mb_test_bit(block, buddy)) {
1563 + if (likely(order == 0)) {
1564 + /* find actual order */
1565 + order = mb_find_order_for_block(e3b, block);
1566 + block = block >> order;
1569 + ex->fe_len = 1 << order;
1570 + ex->fe_start = block << order;
1571 + ex->fe_group = e3b->bd_group;
1573 + /* calc difference from given start */
1574 + next = next - ex->fe_start;
1575 + ex->fe_len -= next;
1576 + ex->fe_start += next;
1578 + while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
1580 + if (block + 1 >= max)
1583 + next = (block + 1) * (1 << order);
1584 + if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
1587 + ord = mb_find_order_for_block(e3b, next);
1590 + block = next >> order;
1591 + ex->fe_len += 1 << order;
1594 + BUG_ON(ex->fe_start + ex->fe_len > (1 << (e3b->bd_blkbits + 3)));
1595 + return ex->fe_len;
1598 +static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
1600 + int ord, mlen = 0, max = 0, cur;
1601 + int start = ex->fe_start;
1602 + int len = ex->fe_len;
1607 + BUG_ON(start + len > (e3b->bd_sb->s_blocksize << 3));
1608 + BUG_ON(e3b->bd_group != ex->fe_group);
1609 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1610 + mb_check_buddy(e3b);
1611 + mb_mark_used_double(e3b, start, len);
1613 + e3b->bd_info->bb_free -= len;
1614 + if (e3b->bd_info->bb_first_free == start)
1615 + e3b->bd_info->bb_first_free += len;
1617 + /* let's maintain fragments counter */
1619 + mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
1620 + if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
1621 + max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
1623 + e3b->bd_info->bb_fragments++;
1624 + else if (!mlen && !max)
1625 + e3b->bd_info->bb_fragments--;
1627 + /* let's maintain buddy itself */
1629 + ord = mb_find_order_for_block(e3b, start);
1631 + if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1632 + /* the whole chunk may be allocated at once! */
1634 + buddy = mb_find_buddy(e3b, ord, &max);
1635 + BUG_ON((start >> ord) >= max);
1636 + mb_set_bit(start >> ord, buddy);
1637 + e3b->bd_info->bb_counters[ord]--;
1644 + /* store for history */
1646 + ret = len | (ord << 16);
1648 + /* we have to split large buddy */
1650 + buddy = mb_find_buddy(e3b, ord, &max);
1651 + mb_set_bit(start >> ord, buddy);
1652 + e3b->bd_info->bb_counters[ord]--;
1655 + cur = (start >> ord) & ~1U;
1656 + buddy = mb_find_buddy(e3b, ord, &max);
1657 + mb_clear_bit(cur, buddy);
1658 + mb_clear_bit(cur + 1, buddy);
1659 + e3b->bd_info->bb_counters[ord]++;
1660 + e3b->bd_info->bb_counters[ord]++;
1663 + mb_set_bits(sb_bgl_lock(EXT3_SB(e3b->bd_sb), ex->fe_group),
1664 + EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
1665 + mb_check_buddy(e3b);
1671 + * Must be called under group lock!
1673 +static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
1674 + struct ext3_buddy *e3b)
1676 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1677 + unsigned long ret;
1679 + BUG_ON(ac->ac_b_ex.fe_group != e3b->bd_group);
1680 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1682 + ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1683 + ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1684 + ret = mb_mark_used(e3b, &ac->ac_b_ex);
1686 + /* preallocation can change ac_b_ex, thus we store actually
1687 + * allocated blocks for history */
1688 + ac->ac_f_ex = ac->ac_b_ex;
1690 + ac->ac_status = AC_STATUS_FOUND;
1691 + ac->ac_tail = ret & 0xffff;
1692 + ac->ac_buddy = ret >> 16;
1694 + /* XXXXXXX: SUCH A HORRIBLE **CK */
1695 + ac->ac_bitmap_page = e3b->bd_bitmap_page;
1696 + get_page(ac->ac_bitmap_page);
1697 + ac->ac_buddy_page = e3b->bd_buddy_page;
1698 + get_page(ac->ac_buddy_page);
1700 + /* store last allocated for subsequent stream allocation */
1701 + if ((ac->ac_flags & EXT3_MB_HINT_DATA)) {
1702 + spin_lock(&sbi->s_md_lock);
1703 + sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1704 + sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1705 + spin_unlock(&sbi->s_md_lock);
1710 + * regular allocator, for general purposes allocation
1713 +void ext3_mb_check_limits(struct ext3_allocation_context *ac,
1714 + struct ext3_buddy *e3b,
1717 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1718 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1719 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1720 + struct ext3_free_extent ex;
1724 + * We don't want to scan for a whole year
1726 + if (ac->ac_found > sbi->s_mb_max_to_scan &&
1727 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1728 + ac->ac_status = AC_STATUS_BREAK;
1733 + * Haven't found good chunk so far, let's continue
1735 + if (bex->fe_len < gex->fe_len)
1738 + if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1739 + && bex->fe_group == e3b->bd_group) {
1740 + /* recheck chunk's availability - we don't know
1741 + * when it was found (within this lock-unlock
1742 + * period or not) */
1743 + max = mb_find_extent(e3b, 0, bex->fe_start, gex->fe_len, &ex);
1744 + if (max >= gex->fe_len) {
1745 + ext3_mb_use_best_found(ac, e3b);
1752 + * The routine checks whether found extent is good enough. If it is,
1753 + * then the extent gets marked used and flag is set to the context
1754 + * to stop scanning. Otherwise, the extent is compared with the
1755 + * previous found extent and if new one is better, then it's stored
1756 + * in the context. Later, the best found extent will be used, if
1757 + * mballoc can't find good enough extent.
1759 + * FIXME: real allocation policy is to be designed yet!
1761 +static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
1762 + struct ext3_free_extent *ex,
1763 + struct ext3_buddy *e3b)
1765 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1766 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1768 + BUG_ON(ex->fe_len <= 0);
1769 + BUG_ON(ex->fe_len >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1770 + BUG_ON(ex->fe_start >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1771 + BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1776 + * The special case - take what you catch first
1778 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1780 + ext3_mb_use_best_found(ac, e3b);
1785 + * Let's check whether the chuck is good enough
1787 + if (ex->fe_len == gex->fe_len) {
1789 + ext3_mb_use_best_found(ac, e3b);
1794 + * If this is first found extent, just store it in the context
1796 + if (bex->fe_len == 0) {
1802 + * If new found extent is better, store it in the context
1804 + if (bex->fe_len < gex->fe_len) {
1805 + /* if the request isn't satisfied, any found extent
1806 + * larger than previous best one is better */
1807 + if (ex->fe_len > bex->fe_len)
1809 + } else if (ex->fe_len > gex->fe_len) {
1810 + /* if the request is satisfied, then we try to find
1811 + * an extent that still satisfy the request, but is
1812 + * smaller than previous one */
1813 + if (ex->fe_len < bex->fe_len)
1817 + ext3_mb_check_limits(ac, e3b, 0);
1820 +static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
1821 + struct ext3_buddy *e3b)
1823 + struct ext3_free_extent ex = ac->ac_b_ex;
1824 + int group = ex.fe_group, max, err;
1826 + BUG_ON(ex.fe_len <= 0);
1827 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1831 + ext3_lock_group(ac->ac_sb, group);
1832 + max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
1836 + ext3_mb_use_best_found(ac, e3b);
1839 + ext3_unlock_group(ac->ac_sb, group);
1840 + ext3_mb_release_desc(e3b);
1845 +static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
1846 + struct ext3_buddy *e3b)
1848 + int group = ac->ac_g_ex.fe_group, max, err;
1849 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1850 + struct ext3_super_block *es = sbi->s_es;
1851 + struct ext3_free_extent ex;
1853 + if (!(ac->ac_flags & EXT3_MB_HINT_TRY_GOAL))
1856 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1860 + ext3_lock_group(ac->ac_sb, group);
1861 + max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
1862 + ac->ac_g_ex.fe_len, &ex);
1864 + if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1865 + unsigned long start;
1866 + start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
1867 + ex.fe_start + le32_to_cpu(es->s_first_data_block));
1868 + if (start % sbi->s_stripe == 0) {
1871 + ext3_mb_use_best_found(ac, e3b);
1873 + } else if (max >= ac->ac_g_ex.fe_len) {
1874 + BUG_ON(ex.fe_len <= 0);
1875 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1876 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1879 + ext3_mb_use_best_found(ac, e3b);
1880 + } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
1881 + /* Sometimes, caller may want to merge even small
1882 + * number of blocks to an existing extent */
1883 + BUG_ON(ex.fe_len <= 0);
1884 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1885 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1888 + ext3_mb_use_best_found(ac, e3b);
1890 + ext3_unlock_group(ac->ac_sb, group);
1891 + ext3_mb_release_desc(e3b);
1897 + * The routine scans buddy structures (not bitmap!) from given order
1898 + * to max order and tries to find big enough chunk to satisfy the req
1900 +static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
1901 + struct ext3_buddy *e3b)
1903 + struct super_block *sb = ac->ac_sb;
1904 + struct ext3_group_info *grp = e3b->bd_info;
1908 + BUG_ON(ac->ac_2order <= 0);
1909 + for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1910 + if (grp->bb_counters[i] == 0)
1913 + buddy = mb_find_buddy(e3b, i, &max);
1914 + BUG_ON(buddy == NULL);
1916 + k = mb_find_next_zero_bit(buddy, max, 0);
1921 + ac->ac_b_ex.fe_len = 1 << i;
1922 + ac->ac_b_ex.fe_start = k << i;
1923 + ac->ac_b_ex.fe_group = e3b->bd_group;
1925 + ext3_mb_use_best_found(ac, e3b);
1927 + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1929 + if (EXT3_SB(sb)->s_mb_stats)
1930 + atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
1937 + * The routine scans the group and measures all found extents.
1938 + * In order to optimize scanning, caller must pass number of
1939 + * free blocks in the group, so the routine can know upper limit.
1941 +static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
1942 + struct ext3_buddy *e3b)
1944 + struct super_block *sb = ac->ac_sb;
1945 + void *bitmap = EXT3_MB_BITMAP(e3b);
1946 + struct ext3_free_extent ex;
1949 + free = e3b->bd_info->bb_free;
1950 + BUG_ON(free <= 0);
1952 + i = e3b->bd_info->bb_first_free;
1954 + while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1955 + i = mb_find_next_zero_bit(bitmap, EXT3_BLOCKS_PER_GROUP(sb), i);
1956 + if (i >= EXT3_BLOCKS_PER_GROUP(sb)) {
1957 + BUG_ON(free != 0);
1961 + mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
1962 + BUG_ON(ex.fe_len <= 0);
1963 + BUG_ON(free < ex.fe_len);
1965 + ext3_mb_measure_extent(ac, &ex, e3b);
1968 + free -= ex.fe_len;
1971 + ext3_mb_check_limits(ac, e3b, 1);
1975 + * This is a special case for storages like raid5
1976 + * we try to find stripe-aligned chunks for stripe-size requests
1978 +static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
1979 + struct ext3_buddy *e3b)
1981 + struct super_block *sb = ac->ac_sb;
1982 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1983 + void *bitmap = EXT3_MB_BITMAP(e3b);
1984 + struct ext3_free_extent ex;
1985 + unsigned long i, max;
1987 + BUG_ON(sbi->s_stripe == 0);
1989 + /* find first stripe-aligned block */
1990 + i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
1991 + + le32_to_cpu(sbi->s_es->s_first_data_block);
1992 + i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
1993 + i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
1994 + % EXT3_BLOCKS_PER_GROUP(sb);
1996 + while (i < EXT3_BLOCKS_PER_GROUP(sb)) {
1997 + if (!mb_test_bit(i, bitmap)) {
1998 + max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
1999 + if (max >= sbi->s_stripe) {
2002 + ext3_mb_use_best_found(ac, e3b);
2006 + i += sbi->s_stripe;
2010 +static int ext3_mb_good_group(struct ext3_allocation_context *ac,
2011 + int group, int cr)
2013 + struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
2014 + unsigned free, fragments, i, bits;
2016 + BUG_ON(cr < 0 || cr >= 4);
2017 + BUG_ON(EXT3_MB_GRP_NEED_INIT(grp));
2019 + free = grp->bb_free;
2020 + fragments = grp->bb_fragments;
2023 + if (fragments == 0)
2028 + BUG_ON(ac->ac_2order == 0);
2029 + bits = ac->ac_sb->s_blocksize_bits + 1;
2030 + for (i = ac->ac_2order; i <= bits; i++)
2031 + if (grp->bb_counters[i] > 0)
2035 + if ((free / fragments) >= ac->ac_g_ex.fe_len)
2039 + if (free >= ac->ac_g_ex.fe_len)
2051 +int ext3_mb_regular_allocator(struct ext3_allocation_context *ac)
2053 + int group, i, cr, err = 0;
2054 + struct ext3_sb_info *sbi;
2055 + struct super_block *sb;
2056 + struct ext3_buddy e3b;
2059 + sbi = EXT3_SB(sb);
2060 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2062 + /* first, try the goal */
2063 + err = ext3_mb_find_by_goal(ac, &e3b);
2064 + if (err || ac->ac_status == AC_STATUS_FOUND)
2067 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
2070 + i = ffs(ac->ac_g_ex.fe_len);
2071 + ac->ac_2order = 0;
2072 + if (i >= sbi->s_mb_order2_reqs) {
2074 + if ((ac->ac_g_ex.fe_len & (~(1 << i))) == 0)
2075 + ac->ac_2order = i;
2078 + /* if stream allocation is enabled, use global goal */
2079 + if ((ac->ac_g_ex.fe_len < sbi->s_mb_large_req) &&
2080 + (ac->ac_flags & EXT3_MB_HINT_DATA)) {
2081 + /* TBD: may be hot point */
2082 + spin_lock(&sbi->s_md_lock);
2083 + ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2084 + ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2085 + spin_unlock(&sbi->s_md_lock);
2088 + group = ac->ac_g_ex.fe_group;
2090 + /* Let's just scan groups to find more-less suitable blocks */
2091 + cr = ac->ac_2order ? 0 : 1;
2093 + for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2094 + ac->ac_criteria = cr;
2095 + for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
2096 + struct ext3_group_info *grp;
2098 + if (group == EXT3_SB(sb)->s_groups_count)
2101 + /* quick check to skip empty groups */
2102 + grp = EXT3_GROUP_INFO(ac->ac_sb, group);
2103 + if (grp->bb_free == 0)
2106 + if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
2107 + /* we need full data about the group
2108 + * to make a good selection */
2109 + err = ext3_mb_load_buddy(sb, group, &e3b);
2112 + ext3_mb_release_desc(&e3b);
2115 + /* check is group good for our criteries */
2116 + if (!ext3_mb_good_group(ac, group, cr))
2119 + err = ext3_mb_load_buddy(sb, group, &e3b);
2123 + ext3_lock_group(sb, group);
2124 + if (!ext3_mb_good_group(ac, group, cr)) {
2125 + /* someone did allocation from this group */
2126 + ext3_unlock_group(sb, group);
2127 + ext3_mb_release_desc(&e3b);
2131 + ac->ac_groups_scanned++;
2133 + ext3_mb_simple_scan_group(ac, &e3b);
2134 + else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe)
2135 + ext3_mb_scan_aligned(ac, &e3b);
2137 + ext3_mb_complex_scan_group(ac, &e3b);
2139 + ext3_unlock_group(sb, group);
2140 + ext3_mb_release_desc(&e3b);
2142 + if (ac->ac_status != AC_STATUS_CONTINUE)
2147 + if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2148 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
2150 + * We've been searching too long. Let's try to allocate
2151 + * the best chunk we've found so far
2154 + ext3_mb_try_best_found(ac, &e3b);
2155 + if (ac->ac_status != AC_STATUS_FOUND) {
2157 + * Someone more lucky has already allocated it.
2158 + * The only thing we can do is just take first
2160 + printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
2162 + ac->ac_b_ex.fe_group = 0;
2163 + ac->ac_b_ex.fe_start = 0;
2164 + ac->ac_b_ex.fe_len = 0;
2165 + ac->ac_status = AC_STATUS_CONTINUE;
2166 + ac->ac_flags |= EXT3_MB_HINT_FIRST;
2168 + atomic_inc(&sbi->s_mb_lost_chunks);
2176 +#ifdef EXT3_MB_HISTORY
2177 +struct ext3_mb_proc_session {
2178 + struct ext3_mb_history *history;
2179 + struct super_block *sb;
2184 +static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
2185 + struct ext3_mb_history *hs,
2188 + if (hs == s->history + s->max)
2190 + if (!first && hs == s->history + s->start)
2192 + while (hs->orig.fe_len == 0) {
2194 + if (hs == s->history + s->max)
2196 + if (hs == s->history + s->start)
2202 +static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2204 + struct ext3_mb_proc_session *s = seq->private;
2205 + struct ext3_mb_history *hs;
2209 + return SEQ_START_TOKEN;
2210 + hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2213 + while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2217 +static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
2219 + struct ext3_mb_proc_session *s = seq->private;
2220 + struct ext3_mb_history *hs = v;
2223 + if (v == SEQ_START_TOKEN)
2224 + return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2226 + return ext3_mb_history_skip_empty(s, ++hs, 0);
2229 +static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
2231 + char buf[25], buf2[25], buf3[25], *fmt;
2232 + struct ext3_mb_history *hs = v;
2234 + if (v == SEQ_START_TOKEN) {
2235 + seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2236 + "%-5s %-2s %-5s %-5s %-5s %-6s\n",
2237 + "pid", "inode", "original", "goal", "result","found",
2238 + "grps", "cr", "flags", "merge", "tail", "broken");
2242 + if (hs->op == EXT3_MB_HISTORY_ALLOC) {
2243 + fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2244 + "%-5u %-5s %-5u %-6u\n";
2245 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2246 + hs->result.fe_start, hs->result.fe_len,
2247 + hs->result.fe_logical);
2248 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2249 + hs->orig.fe_start, hs->orig.fe_len,
2250 + hs->orig.fe_logical);
2251 + sprintf(buf3, "%lu/%lu/%lu@%lu", hs->goal.fe_group,
2252 + hs->goal.fe_start, hs->goal.fe_len,
2253 + hs->goal.fe_logical);
2254 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2255 + hs->found, hs->groups, hs->cr, hs->flags,
2256 + hs->merged ? "M" : "", hs->tail,
2257 + hs->buddy ? 1 << hs->buddy : 0);
2258 + } else if (hs->op == EXT3_MB_HISTORY_PREALLOC) {
2259 + fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2260 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2261 + hs->result.fe_start, hs->result.fe_len,
2262 + hs->result.fe_logical);
2263 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2264 + hs->orig.fe_start, hs->orig.fe_len,
2265 + hs->orig.fe_logical);
2266 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2267 + } else if (hs->op == EXT3_MB_HISTORY_DISCARD) {
2268 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2269 + hs->result.fe_start, hs->result.fe_len);
2270 + seq_printf(seq, "%-5u %-8u %-23s discard\n",
2271 + hs->pid, hs->ino, buf2);
2272 + } else if (hs->op == EXT3_MB_HISTORY_FREE) {
2273 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2274 + hs->result.fe_start, hs->result.fe_len);
2275 + seq_printf(seq, "%-5u %-8u %-23s free\n",
2276 + hs->pid, hs->ino, buf2);
2278 + seq_printf(seq, "unknown op %d\n", hs->op);
2283 +static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
2287 +static struct seq_operations ext3_mb_seq_history_ops = {
2288 + .start = ext3_mb_seq_history_start,
2289 + .next = ext3_mb_seq_history_next,
2290 + .stop = ext3_mb_seq_history_stop,
2291 + .show = ext3_mb_seq_history_show,
2294 +static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
2296 + struct super_block *sb = PDE(inode)->data;
2297 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2298 + struct ext3_mb_proc_session *s;
2301 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2305 + size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
2306 + s->history = kmalloc(size, GFP_KERNEL);
2307 + if (s->history == NULL) {
2312 + spin_lock(&sbi->s_mb_history_lock);
2313 + memcpy(s->history, sbi->s_mb_history, size);
2314 + s->max = sbi->s_mb_history_max;
2315 + s->start = sbi->s_mb_history_cur % s->max;
2316 + spin_unlock(&sbi->s_mb_history_lock);
2318 + rc = seq_open(file, &ext3_mb_seq_history_ops);
2320 + struct seq_file *m = (struct seq_file *)file->private_data;
2323 + kfree(s->history);
2330 +static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
2332 + struct seq_file *seq = (struct seq_file *)file->private_data;
2333 + struct ext3_mb_proc_session *s = seq->private;
2334 + kfree(s->history);
2336 + return seq_release(inode, file);
2339 +static ssize_t ext3_mb_seq_history_write(struct file *file,
2340 + const char __user *buffer,
2341 + size_t count, loff_t *ppos)
2343 + struct seq_file *seq = (struct seq_file *)file->private_data;
2344 + struct ext3_mb_proc_session *s = seq->private;
2345 + struct super_block *sb = s->sb;
2349 + if (count >= sizeof(str)) {
2350 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2351 + "mb_history", (int)sizeof(str));
2352 + return -EOVERFLOW;
2355 + if (copy_from_user(str, buffer, count))
2358 + value = simple_strtol(str, NULL, 0);
2361 + EXT3_SB(sb)->s_mb_history_filter = value;
2366 +static struct file_operations ext3_mb_seq_history_fops = {
2367 + .owner = THIS_MODULE,
2368 + .open = ext3_mb_seq_history_open,
2370 + .write = ext3_mb_seq_history_write,
2371 + .llseek = seq_lseek,
2372 + .release = ext3_mb_seq_history_release,
2375 +static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2377 + struct super_block *sb = seq->private;
2378 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2381 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2385 + return (void *) group;
2388 +static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2390 + struct super_block *sb = seq->private;
2391 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2395 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2398 + return (void *) group;;
2401 +static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
2403 + struct super_block *sb = seq->private;
2404 + struct ext3_group_desc *gdp;
2405 + long group = (long) v;
2406 + int i, err, free = 0;
2407 + struct ext3_buddy e3b;
2409 + struct ext3_group_info info;
2410 + unsigned short counters[16];
2415 + seq_printf(seq, "#%-5s: %-5s %-5s %-5s %-5s %-5s "
2416 + "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2417 + "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2418 + "group", "free", "ingd", "frags", "first", "pa",
2419 + "2^0", "2^1", "2^2", "2^3", "2^4", "2^5","2^6",
2420 + "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2422 + i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2423 + sizeof(struct ext3_group_info);
2424 + err = ext3_mb_load_buddy(sb, group, &e3b);
2426 + seq_printf(seq, "#%-5lu: I/O error\n", group);
2430 + gdp = ext3_get_group_desc(sb, group, NULL);
2432 + free = le16_to_cpu(gdp->bg_free_blocks_count);
2434 + ext3_lock_group(sb, group);
2435 + memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
2436 + ext3_unlock_group(sb, group);
2437 + ext3_mb_release_desc(&e3b);
2439 + seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [", group,
2440 + sg.info.bb_free, free,
2441 + sg.info.bb_fragments, sg.info.bb_first_free,
2442 + sg.info.bb_prealloc_nr);
2443 + for (i = 0; i <= 13; i++)
2444 + seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2445 + sg.info.bb_counters[i] : 0);
2446 + seq_printf(seq, " ]\n");
2451 +static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
2455 +static struct seq_operations ext3_mb_seq_groups_ops = {
2456 + .start = ext3_mb_seq_groups_start,
2457 + .next = ext3_mb_seq_groups_next,
2458 + .stop = ext3_mb_seq_groups_stop,
2459 + .show = ext3_mb_seq_groups_show,
2462 +static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
2464 + struct super_block *sb = PDE(inode)->data;
2467 + rc = seq_open(file, &ext3_mb_seq_groups_ops);
2469 + struct seq_file *m = (struct seq_file *)file->private_data;
2476 +static struct file_operations ext3_mb_seq_groups_fops = {
2477 + .owner = THIS_MODULE,
2478 + .open = ext3_mb_seq_groups_open,
2480 + .llseek = seq_lseek,
2481 + .release = seq_release,
2484 +static void ext3_mb_history_release(struct super_block *sb)
2486 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2488 + remove_proc_entry("mb_groups", sbi->s_dev_proc);
2489 + remove_proc_entry("mb_history", sbi->s_dev_proc);
2491 + if (sbi->s_mb_history)
2492 + kfree(sbi->s_mb_history);
2495 +static void ext3_mb_history_init(struct super_block *sb)
2497 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2500 + if (sbi->s_dev_proc != NULL) {
2501 + struct proc_dir_entry *p;
2502 + p = create_proc_entry("mb_history", S_IRUGO, sbi->s_dev_proc);
2504 + p->proc_fops = &ext3_mb_seq_history_fops;
2507 + p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_dev_proc);
2509 + p->proc_fops = &ext3_mb_seq_groups_fops;
2514 + sbi->s_mb_history_max = 1000;
2515 + sbi->s_mb_history_cur = 0;
2516 + spin_lock_init(&sbi->s_mb_history_lock);
2517 + i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
2518 + sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
2519 + if (likely(sbi->s_mb_history != NULL))
2520 + memset(sbi->s_mb_history, 0, i);
2521 + /* if we can't allocate history, then we simple won't use it */
2525 +ext3_mb_store_history(struct ext3_allocation_context *ac)
2527 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
2528 + struct ext3_mb_history h;
2530 + if (unlikely(sbi->s_mb_history == NULL))
2533 + if (!(ac->ac_op & sbi->s_mb_history_filter))
2537 + h.pid = current->pid;
2538 + h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2539 + h.orig = ac->ac_o_ex;
2540 + h.result = ac->ac_b_ex;
2541 + h.flags = ac->ac_flags;
2542 + h.found = ac->ac_found;
2543 + h.groups = ac->ac_groups_scanned;
2544 + h.cr = ac->ac_criteria;
2545 + h.tail = ac->ac_tail;
2546 + h.buddy = ac->ac_buddy;
2548 + h.cr = ac->ac_criteria;
2549 + if (ac->ac_op == EXT3_MB_HISTORY_ALLOC) {
2550 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2551 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2553 + h.goal = ac->ac_g_ex;
2554 + h.result = ac->ac_f_ex;
2557 + spin_lock(&sbi->s_mb_history_lock);
2558 + memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2559 + if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2560 + sbi->s_mb_history_cur = 0;
2561 + spin_unlock(&sbi->s_mb_history_lock);
2565 +#define ext3_mb_history_release(sb)
2566 +#define ext3_mb_history_init(sb)
2569 +int ext3_mb_init_backend(struct super_block *sb)
2571 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2572 + int i, j, len, metalen;
2573 + int num_meta_group_infos =
2574 + (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2575 + EXT3_DESC_PER_BLOCK_BITS(sb);
2576 + struct ext3_group_info **meta_group_info;
2578 + /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2579 + * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2580 + * So a two level scheme suffices for now. */
2581 + sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
2582 + num_meta_group_infos, GFP_KERNEL);
2583 + if (sbi->s_group_info == NULL) {
2584 + printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
2587 + sbi->s_buddy_cache = new_inode(sb);
2588 + if (sbi->s_buddy_cache == NULL) {
2589 + printk(KERN_ERR "EXT3-fs: can't get new inode\n");
2592 + EXT3_I(sbi->s_buddy_cache)->i_disksize = 0;
2594 + metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
2595 + for (i = 0; i < num_meta_group_infos; i++) {
2596 + if ((i + 1) == num_meta_group_infos)
2597 + metalen = sizeof(*meta_group_info) *
2598 + (sbi->s_groups_count -
2599 + (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
2600 + meta_group_info = kmalloc(metalen, GFP_KERNEL);
2601 + if (meta_group_info == NULL) {
2602 + printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
2604 + goto err_freemeta;
2606 + sbi->s_group_info[i] = meta_group_info;
2610 + * calculate needed size. if change bb_counters size,
2611 + * don't forget about ext3_mb_generate_buddy()
2613 + len = sizeof(struct ext3_group_info);
2614 + len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
2615 + for (i = 0; i < sbi->s_groups_count; i++) {
2616 + struct ext3_group_desc * desc;
2619 + sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
2620 + j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
2622 + meta_group_info[j] = kmalloc(len, GFP_KERNEL);
2623 + if (meta_group_info[j] == NULL) {
2624 + printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
2626 + goto err_freebuddy;
2628 + desc = ext3_get_group_desc(sb, i, NULL);
2629 + if (desc == NULL) {
2630 + printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
2631 + goto err_freebuddy;
2633 + memset(meta_group_info[j], 0, len);
2634 + set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
2635 + &meta_group_info[j]->bb_state);
2637 + /* initialize bb_free to be able to skip
2638 + * empty groups without initialization */
2639 + meta_group_info[j]->bb_free =
2640 + le16_to_cpu(desc->bg_free_blocks_count);
2642 + INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
2644 +#ifdef DOUBLE_CHECK
2646 + struct buffer_head *bh;
2647 + meta_group_info[j]->bb_bitmap =
2648 + kmalloc(sb->s_blocksize, GFP_KERNEL);
2649 + BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
2650 + bh = read_block_bitmap(sb, i);
2651 + BUG_ON(bh == NULL);
2652 + memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
2664 + kfree(EXT3_GROUP_INFO(sb, i));
2667 + i = num_meta_group_infos;
2670 + kfree(sbi->s_group_info[i]);
2671 + iput(sbi->s_buddy_cache);
2673 + kfree(sbi->s_group_info);
2677 +static void ext3_mb_prealloc_table_add(struct ext3_sb_info *sbi, int value)
2681 + if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
2684 + for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
2685 + if (sbi->s_mb_prealloc_table[i] == 0) {
2686 + sbi->s_mb_prealloc_table[i] = value;
2690 + /* they should add values in order */
2691 + if (value <= sbi->s_mb_prealloc_table[i])
2696 +int ext3_mb_init(struct super_block *sb, int needs_recovery)
2698 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2699 + unsigned i, offset, max;
2701 + if (!test_opt(sb, MBALLOC))
2704 + i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2706 + sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2707 + if (sbi->s_mb_offsets == NULL) {
2708 + clear_opt(sbi->s_mount_opt, MBALLOC);
2711 + sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2712 + if (sbi->s_mb_maxs == NULL) {
2713 + clear_opt(sbi->s_mount_opt, MBALLOC);
2714 + kfree(sbi->s_mb_maxs);
2718 + /* order 0 is regular bitmap */
2719 + sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2720 + sbi->s_mb_offsets[0] = 0;
2724 + max = sb->s_blocksize << 2;
2726 + sbi->s_mb_offsets[i] = offset;
2727 + sbi->s_mb_maxs[i] = max;
2728 + offset += 1 << (sb->s_blocksize_bits - i);
2731 + } while (i <= sb->s_blocksize_bits + 1);
2733 + /* init file for buddy data */
2734 + if ((i = ext3_mb_init_backend(sb))) {
2735 + clear_opt(sbi->s_mount_opt, MBALLOC);
2736 + kfree(sbi->s_mb_offsets);
2737 + kfree(sbi->s_mb_maxs);
2741 + spin_lock_init(&sbi->s_md_lock);
2742 + INIT_LIST_HEAD(&sbi->s_active_transaction);
2743 + INIT_LIST_HEAD(&sbi->s_closed_transaction);
2744 + INIT_LIST_HEAD(&sbi->s_committed_transaction);
2745 + spin_lock_init(&sbi->s_bal_lock);
2747 + sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2748 + sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2749 + sbi->s_mb_max_groups_to_scan = MB_DEFAULT_MAX_GROUPS_TO_SCAN;
2750 + sbi->s_mb_stats = MB_DEFAULT_STATS;
2751 + sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2752 + sbi->s_mb_history_filter = EXT3_MB_HISTORY_DEFAULT;
2754 + if (sbi->s_stripe == 0) {
2755 + sbi->s_mb_prealloc_table_size = 8;
2756 + i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
2757 + sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
2758 + if (sbi->s_mb_prealloc_table == NULL) {
2759 + clear_opt(sbi->s_mount_opt, MBALLOC);
2760 + kfree(sbi->s_mb_offsets);
2761 + kfree(sbi->s_mb_maxs);
2764 + memset(sbi->s_mb_prealloc_table, 0, i);
2766 + ext3_mb_prealloc_table_add(sbi, 4);
2767 + ext3_mb_prealloc_table_add(sbi, 8);
2768 + ext3_mb_prealloc_table_add(sbi, 16);
2769 + ext3_mb_prealloc_table_add(sbi, 32);
2770 + ext3_mb_prealloc_table_add(sbi, 64);
2771 + ext3_mb_prealloc_table_add(sbi, 128);
2772 + ext3_mb_prealloc_table_add(sbi, 256);
2773 + ext3_mb_prealloc_table_add(sbi, 512);
2775 + sbi->s_mb_small_req = 256;
2776 + sbi->s_mb_large_req = 1024;
2777 + sbi->s_mb_group_prealloc = 512;
2779 + sbi->s_mb_prealloc_table_size = 3;
2780 + i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
2781 + sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
2782 + if (sbi->s_mb_prealloc_table == NULL) {
2783 + clear_opt(sbi->s_mount_opt, MBALLOC);
2784 + kfree(sbi->s_mb_offsets);
2785 + kfree(sbi->s_mb_maxs);
2788 + memset(sbi->s_mb_prealloc_table, 0, i);
2790 + ext3_mb_prealloc_table_add(sbi, sbi->s_stripe);
2791 + ext3_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
2792 + ext3_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
2794 + sbi->s_mb_small_req = sbi->s_stripe;
2795 + sbi->s_mb_large_req = sbi->s_stripe * 8;
2796 + sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
2799 + i = sizeof(struct ext3_locality_group) * num_possible_cpus();
2800 + sbi->s_locality_groups = kmalloc(i, GFP_NOFS);
2801 + if (sbi->s_locality_groups == NULL) {
2802 + clear_opt(sbi->s_mount_opt, MBALLOC);
2803 + kfree(sbi->s_mb_prealloc_table);
2804 + kfree(sbi->s_mb_offsets);
2805 + kfree(sbi->s_mb_maxs);
2808 + for (i = 0; i < num_possible_cpus(); i++) {
2809 + struct ext3_locality_group *lg;
2810 + lg = &sbi->s_locality_groups[i];
2811 + sema_init(&lg->lg_sem, 1);
2812 + INIT_LIST_HEAD(&lg->lg_prealloc_list);
2813 + spin_lock_init(&lg->lg_prealloc_lock);
2816 + ext3_mb_init_per_dev_proc(sb);
2817 + ext3_mb_history_init(sb);
2819 + printk("EXT3-fs: mballoc enabled\n");
2823 +void ext3_mb_cleanup_pa(struct ext3_group_info *grp)
2825 + struct ext3_prealloc_space *pa;
2826 + struct list_head *cur, *tmp;
2829 + list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2830 + pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
2831 + list_del_rcu(&pa->pa_group_list);
2836 + mb_debug("mballoc: %u PAs left\n", count);
2840 +int ext3_mb_release(struct super_block *sb)
2842 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2843 + int i, num_meta_group_infos;
2845 + if (!test_opt(sb, MBALLOC))
2848 + /* release freed, non-committed blocks */
2849 + spin_lock(&sbi->s_md_lock);
2850 + list_splice_init(&sbi->s_closed_transaction,
2851 + &sbi->s_committed_transaction);
2852 + list_splice_init(&sbi->s_active_transaction,
2853 + &sbi->s_committed_transaction);
2854 + spin_unlock(&sbi->s_md_lock);
2855 + ext3_mb_free_committed_blocks(sb);
2857 + if (sbi->s_group_info) {
2858 + for (i = 0; i < sbi->s_groups_count; i++) {
2859 +#ifdef DOUBLE_CHECK
2860 + if (EXT3_GROUP_INFO(sb, i)->bb_bitmap)
2861 + kfree(EXT3_GROUP_INFO(sb, i)->bb_bitmap);
2863 + ext3_mb_cleanup_pa(EXT3_GROUP_INFO(sb, i));
2864 + kfree(EXT3_GROUP_INFO(sb, i));
2866 + num_meta_group_infos = (sbi->s_groups_count +
2867 + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2868 + EXT3_DESC_PER_BLOCK_BITS(sb);
2869 + for (i = 0; i < num_meta_group_infos; i++)
2870 + kfree(sbi->s_group_info[i]);
2871 + kfree(sbi->s_group_info);
2873 + if (sbi->s_mb_offsets)
2874 + kfree(sbi->s_mb_offsets);
2875 + if (sbi->s_mb_maxs)
2876 + kfree(sbi->s_mb_maxs);
2877 + if (sbi->s_buddy_cache)
2878 + iput(sbi->s_buddy_cache);
2879 + if (sbi->s_mb_stats) {
2880 + printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
2881 + atomic_read(&sbi->s_bal_allocated),
2882 + atomic_read(&sbi->s_bal_reqs),
2883 + atomic_read(&sbi->s_bal_success));
2884 + printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
2885 + "%u 2^N hits, %u breaks, %u lost\n",
2886 + atomic_read(&sbi->s_bal_ex_scanned),
2887 + atomic_read(&sbi->s_bal_goals),
2888 + atomic_read(&sbi->s_bal_2orders),
2889 + atomic_read(&sbi->s_bal_breaks),
2890 + atomic_read(&sbi->s_mb_lost_chunks));
2891 + printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
2892 + sbi->s_mb_buddies_generated++,
2893 + sbi->s_mb_generation_time);
2894 + printk("EXT3-fs: mballoc: %u preallocated, %u discarded\n",
2895 + atomic_read(&sbi->s_mb_preallocated),
2896 + atomic_read(&sbi->s_mb_discarded));
2899 + if (sbi->s_locality_groups)
2900 + kfree(sbi->s_locality_groups);
2902 + ext3_mb_history_release(sb);
2903 + ext3_mb_destroy_per_dev_proc(sb);
2908 +void ext3_mb_free_committed_blocks(struct super_block *sb)
2910 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2911 + int err, i, count = 0, count2 = 0;
2912 + struct ext3_free_metadata *md;
2913 + struct ext3_buddy e3b;
2915 + if (list_empty(&sbi->s_committed_transaction))
2918 + /* there is committed blocks to be freed yet */
2920 + /* get next array of blocks */
2922 + spin_lock(&sbi->s_md_lock);
2923 + if (!list_empty(&sbi->s_committed_transaction)) {
2924 + md = list_entry(sbi->s_committed_transaction.next,
2925 + struct ext3_free_metadata, list);
2926 + list_del(&md->list);
2928 + spin_unlock(&sbi->s_md_lock);
2933 + mb_debug("gonna free %u blocks in group %u (0x%p):",
2934 + md->num, md->group, md);
2936 + err = ext3_mb_load_buddy(sb, md->group, &e3b);
2937 + /* we expect to find existing buddy because it's pinned */
2940 + /* there are blocks to put in buddy to make them really free */
2943 + ext3_lock_group(sb, md->group);
2944 + for (i = 0; i < md->num; i++) {
2945 + mb_debug(" %u", md->blocks[i]);
2946 + err = mb_free_blocks(NULL, &e3b, md->blocks[i], 1);
2950 + ext3_unlock_group(sb, md->group);
2952 + /* balance refcounts from ext3_mb_free_metadata() */
2953 + page_cache_release(e3b.bd_buddy_page);
2954 + page_cache_release(e3b.bd_bitmap_page);
2957 + ext3_mb_release_desc(&e3b);
2961 + mb_debug("freed %u blocks in %u structures\n", count, count2);
2964 +#define EXT3_MB_STATS_NAME "stats"
2965 +#define EXT3_MB_MAX_TO_SCAN_NAME "max_to_scan"
2966 +#define EXT3_MB_MIN_TO_SCAN_NAME "min_to_scan"
2967 +#define EXT3_MB_ORDER2_REQ "order2_req"
2968 +#define EXT3_MB_SMALL_REQ "small_req"
2969 +#define EXT3_MB_LARGE_REQ "large_req"
2970 +#define EXT3_MB_PREALLOC_TABLE "prealloc_table"
2971 +#define EXT3_MB_GROUP_PREALLOC "group_prealloc"
2973 +static int ext3_mb_read_prealloc_table(char *page, char **start, off_t off,
2974 + int count, int *eof, void *data)
2976 + struct ext3_sb_info *sbi = data;
2984 + for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
2985 + len += sprintf(page + len, "%ld ",
2986 + sbi->s_mb_prealloc_table[i]);
2987 + len += sprintf(page + len, "\n");
2993 +static int ext3_mb_write_prealloc_table(struct file *file,
2994 + const char __user *buf,
2995 + unsigned long cnt, void *data)
2997 + struct ext3_sb_info *sbi = data;
2998 + unsigned long value;
2999 + unsigned long prev = 0;
3003 + unsigned long *new_table;
3007 + if (cnt >= sizeof(str))
3009 + if (copy_from_user(str, buf, cnt))
3015 + while (cur < end) {
3016 + while ((cur < end) && (*cur == ' ')) cur++;
3017 + value = simple_strtol(cur, &cur, 0);
3020 + if (value <= prev)