1 Index: linux-2.6.9-full/include/linux/ext3_fs.h
2 ===================================================================
3 --- linux-2.6.9-full.orig/include/linux/ext3_fs.h 2007-06-08 23:44:08.000000000 +0400
4 +++ linux-2.6.9-full/include/linux/ext3_fs.h 2007-10-17 22:25:01.000000000 +0400
5 @@ -57,6 +57,30 @@ struct statfs;
6 #define ext3_debug(f, a...) do {} while (0)
9 +#define EXT3_MULTIBLOCK_ALLOCATOR 1
11 +#define EXT3_MB_HINT_MERGE 1 /* prefer goal again. length */
12 +#define EXT3_MB_HINT_RESERVED 2 /* blocks already reserved */
13 +#define EXT3_MB_HINT_METADATA 4 /* metadata is being allocated */
14 +#define EXT3_MB_HINT_FIRST 8 /* first blocks in the file */
15 +#define EXT3_MB_HINT_BEST 16 /* search for the best chunk */
16 +#define EXT3_MB_HINT_DATA 32 /* data is being allocated */
17 +#define EXT3_MB_HINT_NOPREALLOC 64 /* don't preallocate (for tails) */
18 +#define EXT3_MB_HINT_GROUP_ALLOC 128 /* allocate for locality group */
19 +#define EXT3_MB_HINT_GOAL_ONLY 256 /* allocate goal blocks or none */
21 +struct ext3_allocation_request {
22 + struct inode *inode; /* target inode for block we're allocating */
23 + unsigned long logical; /* logical block in target inode */
24 + unsigned long goal; /* phys. target (a hint) */
25 + unsigned long lleft; /* the closest logical allocated block to the left */
26 + unsigned long pleft; /* phys. block for ^^^ */
27 + unsigned long lright; /* the closest logical allocated block to the right */
28 + unsigned long pright; /* phys. block for ^^^ */
29 + unsigned long len; /* how many blocks we want to allocate */
30 + unsigned long flags; /* flags. see above EXT3_MB_HINT_* */
34 * Special inodes numbers
36 @@ -387,6 +411,14 @@ struct ext3_inode {
37 #define ext3_find_first_zero_bit ext2_find_first_zero_bit
38 #define ext3_find_next_zero_bit ext2_find_next_zero_bit
40 +#ifndef ext2_find_next_le_bit
41 +#ifdef __LITTLE_ENDIAN
42 +#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
44 +#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
45 +#endif /* __LITTLE_ENDIAN */
46 +#endif /* !ext2_find_next_le_bit */
49 * Maximal mount counts between two filesystem checks
51 @@ -763,6 +795,20 @@ extern unsigned long ext3_count_dirs (st
52 extern void ext3_check_inodes_bitmap (struct super_block *);
53 extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
56 +extern long ext3_mb_stats;
57 +extern long ext3_mb_max_to_scan;
58 +extern int ext3_mb_init(struct super_block *, int);
59 +extern int ext3_mb_release(struct super_block *);
60 +extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *);
61 +extern int ext3_mb_reserve_blocks(struct super_block *, int);
62 +extern void ext3_mb_release_blocks(struct super_block *, int);
63 +extern void ext3_mb_release_blocks(struct super_block *, int);
64 +extern void ext3_mb_discard_inode_preallocations(struct inode *);
65 +extern int __init init_ext3_mb_proc(void);
66 +extern void exit_ext3_mb_proc(void);
67 +extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *);
71 extern int ext3_block_truncate_page(handle_t *, struct page *,
72 @@ -804,6 +850,10 @@ extern int ext3_group_extend(struct supe
73 unsigned long n_blocks_count);
76 +extern struct proc_dir_entry *proc_root_ext3;
77 +extern int __init init_ext3_proc(void);
78 +extern void exit_ext3_proc(void);
80 extern void ext3_error (struct super_block *, const char *, const char *, ...)
81 __attribute__ ((format (printf, 3, 4)));
82 extern void __ext3_std_error (struct super_block *, const char *, int);
83 Index: linux-2.6.9-full/include/linux/ext3_fs_sb.h
84 ===================================================================
85 --- linux-2.6.9-full.orig/include/linux/ext3_fs_sb.h 2007-06-08 23:44:07.000000000 +0400
86 +++ linux-2.6.9-full/include/linux/ext3_fs_sb.h 2007-10-17 22:25:01.000000000 +0400
87 @@ -81,6 +81,61 @@ struct ext3_sb_info {
88 char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
89 int s_jquota_fmt; /* Format of quota to use */
92 + /* for buddy allocator */
93 + struct ext3_group_info ***s_group_info;
94 + struct inode *s_buddy_cache;
95 + long s_blocks_reserved;
96 + spinlock_t s_reserve_lock;
97 + struct list_head s_active_transaction;
98 + struct list_head s_closed_transaction;
99 + struct list_head s_committed_transaction;
100 + spinlock_t s_md_lock;
101 + tid_t s_last_transaction;
102 + unsigned short *s_mb_offsets, *s_mb_maxs;
105 + unsigned long s_mb_factor;
106 + unsigned long s_stripe;
107 + unsigned long s_mb_stream_request;
108 + unsigned long s_mb_max_to_scan;
109 + unsigned long s_mb_min_to_scan;
110 + unsigned long s_mb_max_groups_to_scan;
111 + unsigned long s_mb_stats;
112 + unsigned long s_mb_order2_reqs;
114 + /* history to debug policy */
115 + struct ext3_mb_history *s_mb_history;
116 + int s_mb_history_cur;
117 + int s_mb_history_max;
118 + int s_mb_history_num;
119 + struct proc_dir_entry *s_dev_proc;
120 + spinlock_t s_mb_history_lock;
121 + int s_mb_history_filter;
123 + /* stats for buddy allocator */
124 + spinlock_t s_mb_pa_lock;
125 + atomic_t s_bal_reqs; /* number of reqs with len > 1 */
126 + atomic_t s_bal_success; /* we found long enough chunks */
127 + atomic_t s_bal_allocated; /* in blocks */
128 + atomic_t s_bal_ex_scanned; /* total extents scanned */
129 + atomic_t s_bal_goals; /* goal hits */
130 + atomic_t s_bal_breaks; /* too long searches */
131 + atomic_t s_bal_2orders; /* 2^order hits */
132 + spinlock_t s_bal_lock;
133 + unsigned long s_mb_buddies_generated;
134 + unsigned long long s_mb_generation_time;
135 + atomic_t s_mb_lost_chunks;
136 + atomic_t s_mb_preallocated;
137 + atomic_t s_mb_discarded;
139 + /* locality groups */
140 + struct ext3_locality_group *s_locality_groups;
144 +#define EXT3_GROUP_INFO(sb, group) \
145 + EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
146 + [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
148 #endif /* _LINUX_EXT3_FS_SB */
149 Index: linux-2.6.9-full/fs/ext3/super.c
150 ===================================================================
151 --- linux-2.6.9-full.orig/fs/ext3/super.c 2007-06-08 23:44:08.000000000 +0400
152 +++ linux-2.6.9-full/fs/ext3/super.c 2007-10-17 22:26:27.000000000 +0400
153 @@ -394,6 +394,7 @@ void ext3_put_super (struct super_block
154 struct ext3_super_block *es = sbi->s_es;
157 + ext3_mb_release(sb);
158 ext3_ext_release(sb);
159 ext3_xattr_put_super(sb);
160 journal_destroy(sbi->s_journal);
161 @@ -438,6 +439,8 @@ void ext3_put_super (struct super_block
162 invalidate_bdev(sbi->journal_bdev, 0);
163 ext3_blkdev_remove(sbi);
165 + remove_proc_entry(sb->s_id, proc_root_ext3);
166 + sbi->s_dev_proc = NULL;
167 sb->s_fs_info = NULL;
170 @@ -463,6 +466,8 @@ static struct inode *ext3_alloc_inode(st
171 ei->vfs_inode.i_version = 1;
173 memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
174 + INIT_LIST_HEAD(&ei->i_prealloc_list);
175 + spin_lock_init(&ei->i_prealloc_lock);
176 return &ei->vfs_inode;
179 @@ -1353,6 +1358,13 @@ static int ext3_fill_super (struct super
180 sbi->s_mount_opt = 0;
181 sbi->s_resuid = EXT3_DEF_RESUID;
182 sbi->s_resgid = EXT3_DEF_RESGID;
183 + sbi->s_dev_proc = proc_mkdir(sb->s_id, proc_root_ext3);
184 + if (sbi->s_dev_proc == NULL) {
185 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", sb->s_id);
186 + sb->s_fs_info = NULL;
191 blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
193 @@ -1729,6 +1741,8 @@ failed_mount:
194 ext3_blkdev_remove(sbi);
197 + remove_proc_entry(sb->s_id, proc_root_ext3);
198 + sbi->s_dev_proc = NULL;
199 sb->s_fs_info = NULL;
202 @@ -2593,9 +2607,47 @@ static struct file_system_type ext3_fs_t
203 .fs_flags = FS_REQUIRES_DEV,
206 +#define EXT3_ROOT "ext3"
207 +struct proc_dir_entry *proc_root_ext3;
209 +int __init init_ext3_proc(void)
211 + struct proc_dir_entry *proc;
214 + if ((ret = init_ext3_mb_proc()))
217 + proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
218 + if (proc_root_ext3 == NULL) {
219 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
227 + exit_ext3_mb_proc();
232 +void exit_ext3_proc(void)
234 + exit_ext3_mb_proc();
235 + remove_proc_entry(EXT3_ROOT, proc_root_fs);
238 static int __init init_ext3_fs(void)
240 - int err = init_ext3_xattr();
243 + err = init_ext3_proc();
247 + err = init_ext3_xattr();
250 err = init_inodecache();
251 @@ -2617,6 +2669,7 @@ static void __exit exit_ext3_fs(void)
252 unregister_filesystem(&ext3_fs_type);
253 destroy_inodecache();
258 int ext3_map_inode_page(struct inode *inode, struct page *page,
259 Index: linux-2.6.9-full/fs/ext3/mballoc.c
260 ===================================================================
261 --- linux-2.6.9-full.orig/fs/ext3/mballoc.c 2007-10-17 21:59:51.072534980 +0400
262 +++ linux-2.6.9-full/fs/ext3/mballoc.c 2007-10-17 23:09:22.000000000 +0400
265 + * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
266 + * Written by Alex Tomas <alex@clusterfs.com>
268 + * This program is free software; you can redistribute it and/or modify
269 + * it under the terms of the GNU General Public License version 2 as
270 + * published by the Free Software Foundation.
272 + * This program is distributed in the hope that it will be useful,
273 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
274 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
275 + * GNU General Public License for more details.
277 + * You should have received a copy of the GNU General Public Licens
278 + * along with this program; if not, write to the Free Software
279 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
284 + * mballoc.c contains the multiblocks allocation routines
287 +#include <linux/time.h>
288 +#include <linux/fs.h>
289 +#include <linux/namei.h>
290 +#include <linux/ext3_jbd.h>
291 +#include <linux/jbd.h>
292 +#include <linux/ext3_fs.h>
293 +#include <linux/quotaops.h>
294 +#include <linux/buffer_head.h>
295 +#include <linux/module.h>
296 +#include <linux/swap.h>
297 +#include <linux/proc_fs.h>
298 +#include <linux/pagemap.h>
299 +#include <linux/seq_file.h>
300 +#include <linux/version.h>
304 + * - test ext3_ext_search_left() and ext3_ext_search_right()
305 + * - search for metadata in few groups
308 + * - normalization should take into account whether file is still open
309 + * - discard preallocations if no free space left (policy?)
310 + * - don't normalize tails
312 + * - reservation for superuser
315 + * - bitmap read-ahead (proposed by Oleg Drokin aka green)
316 + * - track min/max extents in each group for better group selection
317 + * - mb_mark_used() may allocate chunk right after splitting buddy
318 + * - tree of groups sorted by number of free blocks
323 + * mballoc operates on the following data:
325 + * - in-core buddy (actually includes buddy and bitmap)
326 + * - preallocation descriptors (PAs)
328 + * there are two types of preallocations:
330 + * assiged to specific inode and can be used for this inode only.
331 + * it describes part of inode's space preallocated to specific
332 + * physical blocks. any block from that preallocated can be used
333 + * independent. the descriptor just tracks number of blocks left
334 + * unused. so, before taking some block from descriptor, one must
335 + * make sure corresponded logical block isn't allocated yet. this
336 + * also means that freeing any block within descriptor's range
337 + * must discard all preallocated blocks.
339 + * assigned to specific locality group which does not translate to
340 + * permanent set of inodes: inode can join and leave group. space
341 + * from this type of preallocation can be used for any inode. thus
342 + * it's consumed from the beginning to the end.
344 + * relation between them can be expressed as:
345 + * in-core buddy = on-disk bitmap + preallocation descriptors
347 + * this mean blocks mballoc considers used are:
348 + * - allocated blocks (persistent)
349 + * - preallocated blocks (non-persistent)
351 + * consistency in mballoc world means that at any time a block is either
352 + * free or used in ALL structures. notice: "any time" should not be read
353 + * literally -- time is discrete and delimited by locks.
355 + * to keep it simple, we don't use block numbers, instead we count number of
356 + * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
358 + * all operations can be expressed as:
359 + * - init buddy: buddy = on-disk + PAs
360 + * - new PA: buddy += N; PA = N
361 + * - use inode PA: on-disk += N; PA -= N
362 + * - discard inode PA buddy -= on-disk - PA; PA = 0
363 + * - use locality group PA on-disk += N; PA -= N
364 + * - discard locality group PA buddy -= PA; PA = 0
365 + * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
366 + * is used in real operation because we can't know actual used
367 + * bits from PA, only from on-disk bitmap
369 + * if we follow this strict logic, then all operations above should be atomic.
370 + * given some of them can block, we'd have to use something like semaphores
371 + * killing performance on high-end SMP hardware. let's try to relax it using
372 + * the following knowledge:
373 + * 1) if buddy is referenced, it's already initialized
374 + * 2) while block is used in buddy and the buddy is referenced,
375 + * nobody can re-allocate that block
376 + * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
377 + * bit set and PA claims same block, it's OK. IOW, one can set bit in
378 + * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
381 + * so, now we're building a concurrency table:
384 + * blocks for PA are allocated in the buddy, buddy must be referenced
385 + * until PA is linked to allocation group to avoid concurrent buddy init
387 + * we need to make sure that either on-disk bitmap or PA has uptodate data
388 + * given (3) we care that PA-=N operation doesn't interfere with init
389 + * - discard inode PA
390 + * the simplest way would be to have buddy initialized by the discard
391 + * - use locality group PA
392 + * again PA-=N must be serialized with init
393 + * - discard locality group PA
394 + * the simplest way would be to have buddy initialized by the discard
397 + * i_truncate_mutex serializes them
398 + * - discard inode PA
399 + * discard process must wait until PA isn't used by another process
400 + * - use locality group PA
401 + * some mutex should serialize them
402 + * - discard locality group PA
403 + * discard process must wait until PA isn't used by another process
406 + * i_truncate_mutex or another mutex should serializes them
407 + * - discard inode PA
408 + * discard process must wait until PA isn't used by another process
409 + * - use locality group PA
410 + * nothing wrong here -- they're different PAs covering different blocks
411 + * - discard locality group PA
412 + * discard process must wait until PA isn't used by another process
414 + * now we're ready to make few consequences:
415 + * - PA is referenced and while it is no discard is possible
416 + * - PA is referenced until block isn't marked in on-disk bitmap
417 + * - PA changes only after on-disk bitmap
418 + * - discard must not compete with init. either init is done before
419 + * any discard or they're serialized somehow
420 + * - buddy init as sum of on-disk bitmap and PAs is done atomically
422 + * a special case when we've used PA to emptiness. no need to modify buddy
423 + * in this case, but we should care about concurrent init
428 + * Logic in few words:
433 + * mark bits in on-disk bitmap
436 + * - use preallocation:
437 + * find proper PA (per-inode or group)
439 + * mark bits in on-disk bitmap
445 + * mark bits in on-disk bitmap
448 + * - discard preallocations in group:
450 + * move them onto local list
451 + * load on-disk bitmap
453 + * remove PA from object (inode or locality group)
454 + * mark free blocks in-core
456 + * - discard inode's preallocations:
463 + * - bitlock on a group (group)
464 + * - object (inode/locality) (object)
465 + * - per-pa lock (pa)
472 + * - find and use pa:
475 + * - release consumed pa:
480 + * - generate in-core bitmap:
484 + * - discard all for given object (inode, locality group):
489 + * - discard all for given group:
498 + * with AGGRESSIVE_CHECK allocator runs consistency checks over
499 + * structures. these checks slow things down a lot
501 +#define AGGRESSIVE_CHECK__
504 + * with DOUBLE_CHECK defined mballoc creates persistent in-core
505 + * bitmaps, maintains and uses them to check for double allocations
507 +#define DOUBLE_CHECK__
513 +#define mb_debug(fmt,a...) printk(fmt, ##a)
515 +#define mb_debug(fmt,a...)
519 + * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
520 + * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
522 +#define EXT3_MB_HISTORY
523 +#define EXT3_MB_HISTORY_ALLOC 1 /* allocation */
524 +#define EXT3_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
525 +#define EXT3_MB_HISTORY_DISCARD 4 /* preallocation discarded */
526 +#define EXT3_MB_HISTORY_FREE 8 /* free */
528 +#define EXT3_MB_HISTORY_DEFAULT (EXT3_MB_HISTORY_ALLOC | \
529 + EXT3_MB_HISTORY_PREALLOC | \
530 + EXT3_MB_HISTORY_DISCARD | \
531 + EXT3_MB_HISTORY_FREE)
534 + * How long mballoc can look for a best extent (in found extents)
536 +#define MB_DEFAULT_MAX_TO_SCAN 200
539 + * How long mballoc must look for a best extent
541 +#define MB_DEFAULT_MIN_TO_SCAN 10
544 + * How many groups mballoc will scan looking for the best chunk
546 +#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
549 + * with 'ext3_mb_stats' allocator will collect stats that will be
550 + * shown at umount. The collecting costs though!
552 +#define MB_DEFAULT_STATS 1
555 + * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
556 + * by the stream allocator, which purpose is to pack requests
557 + * as close each to other as possible to produce smooth I/O traffic
559 +#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
562 + * for which requests use 2^N search using buddies
564 +#define MB_DEFAULT_ORDER2_REQS 8
567 + * default stripe size = 1MB
569 +#define MB_DEFAULT_STRIPE 256
571 +static kmem_cache_t *ext3_pspace_cachep = NULL;
573 +#ifdef EXT3_BB_MAX_BLOCKS
574 +#undef EXT3_BB_MAX_BLOCKS
576 +#define EXT3_BB_MAX_BLOCKS 30
578 +struct ext3_free_metadata {
579 + unsigned short group;
580 + unsigned short num;
581 + unsigned short blocks[EXT3_BB_MAX_BLOCKS];
582 + struct list_head list;
585 +struct ext3_group_info {
586 + unsigned long bb_state;
587 + unsigned long bb_tid;
588 + struct ext3_free_metadata *bb_md_cur;
589 + unsigned short bb_first_free;
590 + unsigned short bb_free;
591 + unsigned short bb_fragments;
592 + struct list_head bb_prealloc_list;
596 + unsigned short bb_counters[];
599 +#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
600 +#define EXT3_GROUP_INFO_LOCKED_BIT 1
602 +#define EXT3_MB_GRP_NEED_INIT(grp) \
603 + (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
606 +struct ext3_prealloc_space {
607 + struct list_head pa_inode_list;
608 + struct list_head pa_group_list;
610 + struct list_head pa_tmp_list;
611 + struct rcu_head pa_rcu;
613 + spinlock_t pa_lock;
615 + unsigned pa_deleted;
616 + unsigned long pa_pstart; /* phys. block */
617 + unsigned long pa_lstart; /* log. block */
618 + unsigned short pa_len; /* len of preallocated chunk */
619 + unsigned short pa_free; /* how many blocks are free */
620 + unsigned short pa_linear; /* consumed in one direction
621 + * strictly, for group prealloc */
622 + spinlock_t *pa_obj_lock;
623 + struct inode *pa_inode; /* hack, for history only */
627 +struct ext3_free_extent {
628 + unsigned long fe_logical;
629 + unsigned long fe_start;
630 + unsigned long fe_group;
631 + unsigned long fe_len;
636 + * we try to group all related changes together
637 + * so that writeback can flush/allocate them together as well
639 +struct ext3_locality_group {
640 + /* for allocator */
641 + struct semaphore lg_sem; /* to serialize allocates */
642 + struct list_head lg_prealloc_list;/* list of preallocations */
643 + spinlock_t lg_prealloc_lock;
646 +struct ext3_allocation_context {
647 + struct inode *ac_inode;
648 + struct super_block *ac_sb;
650 + /* original request */
651 + struct ext3_free_extent ac_o_ex;
653 + /* goal request (after normalization) */
654 + struct ext3_free_extent ac_g_ex;
656 + /* the best found extent */
657 + struct ext3_free_extent ac_b_ex;
659 + /* copy of the bext found extent taken before preallocation efforts */
660 + struct ext3_free_extent ac_f_ex;
662 + /* number of iterations done. we have to track to limit searching */
663 + unsigned long ac_ex_scanned;
664 + __u16 ac_groups_scanned;
668 + __u16 ac_flags; /* allocation hints */
672 + __u8 ac_2order; /* if request is to allocate 2^N blocks and
673 + * N > 0, the field stores N, otherwise 0 */
674 + __u8 ac_op; /* operation, for history only */
675 + struct page *ac_bitmap_page;
676 + struct page *ac_buddy_page;
677 + struct ext3_prealloc_space *ac_pa;
678 + struct ext3_locality_group *ac_lg;
681 +#define AC_STATUS_CONTINUE 1
682 +#define AC_STATUS_FOUND 2
683 +#define AC_STATUS_BREAK 3
685 +struct ext3_mb_history {
686 + struct ext3_free_extent orig; /* orig allocation */
687 + struct ext3_free_extent goal; /* goal allocation */
688 + struct ext3_free_extent result; /* result allocation */
691 + __u16 found; /* how many extents have been found */
692 + __u16 groups; /* how many groups have been scanned */
693 + __u16 tail; /* what tail broke some buddy */
694 + __u16 buddy; /* buddy the tail ^^^ broke */
696 + __u8 cr:3; /* which phase the result extent was found at */
702 + struct page *bd_buddy_page;
704 + struct page *bd_bitmap_page;
706 + struct ext3_group_info *bd_info;
707 + struct super_block *bd_sb;
711 +#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
712 +#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
714 +#ifndef EXT3_MB_HISTORY
715 +#define ext3_mb_store_history(ac)
717 +static void ext3_mb_store_history(struct ext3_allocation_context *ac);
720 +#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
722 +int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
723 +struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
724 +unsigned long ext3_new_blocks_old(handle_t *handle, struct inode *inode,
725 + unsigned long goal, unsigned long *count, int *errp);
726 +void ext3_mb_release_blocks(struct super_block *, int);
727 +void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
728 +void ext3_mb_free_committed_blocks(struct super_block *);
729 +void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group);
730 +void ext3_mb_free_consumed_preallocations(struct ext3_allocation_context *ac);
731 +void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
732 + sector_t block, int count);
733 +void ext3_mb_show_ac(struct ext3_allocation_context *ac);
734 +void ext3_mb_check_with_pa(struct ext3_buddy *e3b, int first, int count);
735 +void ext3_mb_put_pa(struct ext3_allocation_context *, struct super_block *, struct ext3_prealloc_space *pa);
736 +int ext3_mb_init_per_dev_proc(struct super_block *sb);
737 +int ext3_mb_destroy_per_dev_proc(struct super_block *sb);
740 + * Calculate the block group number and offset, given a block number
742 +static void ext3_get_group_no_and_offset(struct super_block *sb,
743 + unsigned long blocknr,
744 + unsigned long *blockgrpp,
745 + unsigned long *offsetp)
747 + struct ext3_super_block *es = EXT3_SB(sb)->s_es;
748 + unsigned long offset;
750 + blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
751 + offset = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
752 + blocknr = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
756 + *blockgrpp = blocknr;
761 +ext3_lock_group(struct super_block *sb, int group)
763 + bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
764 + &EXT3_GROUP_INFO(sb, group)->bb_state);
768 +ext3_unlock_group(struct super_block *sb, int group)
770 + bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
771 + &EXT3_GROUP_INFO(sb, group)->bb_state);
775 +ext3_is_group_locked(struct super_block *sb, int group)
777 + return bit_spin_is_locked(EXT3_GROUP_INFO_LOCKED_BIT,
778 + &EXT3_GROUP_INFO(sb, group)->bb_state);
781 +unsigned long ext3_grp_offs_to_block(struct super_block *sb,
782 + struct ext3_free_extent *fex)
784 + unsigned long block;
786 + block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
788 + + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
792 +#if BITS_PER_LONG == 64
793 +#define mb_correct_addr_and_bit(bit,addr) \
795 + bit += ((unsigned long) addr & 7UL) << 3; \
796 + addr = (void *) ((unsigned long) addr & ~7UL); \
798 +#elif BITS_PER_LONG == 32
799 +#define mb_correct_addr_and_bit(bit,addr) \
801 + bit += ((unsigned long) addr & 3UL) << 3; \
802 + addr = (void *) ((unsigned long) addr & ~3UL); \
805 +#error "how many bits you are?!"
808 +static inline int mb_test_bit(int bit, void *addr)
810 + mb_correct_addr_and_bit(bit,addr);
811 + return ext2_test_bit(bit, addr);
814 +static inline void mb_set_bit(int bit, void *addr)
816 + mb_correct_addr_and_bit(bit,addr);
817 + ext2_set_bit(bit, addr);
820 +static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
822 + mb_correct_addr_and_bit(bit,addr);
823 + ext2_set_bit_atomic(lock, bit, addr);
826 +static inline void mb_clear_bit(int bit, void *addr)
828 + mb_correct_addr_and_bit(bit,addr);
829 + ext2_clear_bit(bit, addr);
832 +static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
834 + mb_correct_addr_and_bit(bit,addr);
835 + ext2_clear_bit_atomic(lock, bit, addr);
838 +static inline int mb_find_next_zero_bit(void *addr, int max, int start)
841 +#if BITS_PER_LONG == 64
842 + fix = ((unsigned long) addr & 7UL) << 3;
843 + addr = (void *) ((unsigned long) addr & ~7UL);
844 +#elif BITS_PER_LONG == 32
845 + fix = ((unsigned long) addr & 3UL) << 3;
846 + addr = (void *) ((unsigned long) addr & ~3UL);
848 +#error "how many bits you are?!"
852 + return ext2_find_next_zero_bit(addr, max, start) - fix;
855 +static inline int mb_find_next_bit(void *addr, int max, int start)
858 +#if BITS_PER_LONG == 64
859 + fix = ((unsigned long) addr & 7UL) << 3;
860 + addr = (void *) ((unsigned long) addr & ~7UL);
861 +#elif BITS_PER_LONG == 32
862 + fix = ((unsigned long) addr & 3UL) << 3;
863 + addr = (void *) ((unsigned long) addr & ~3UL);
865 +#error "how many bits you are?!"
872 + return find_next_bit(addr, max, start) - fix;
876 +static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
880 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
881 + BUG_ON(max == NULL);
883 + if (order > e3b->bd_blkbits + 1) {
888 + /* at order 0 we see each particular block */
889 + *max = 1 << (e3b->bd_blkbits + 3);
891 + return EXT3_MB_BITMAP(e3b);
893 + bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
894 + *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
900 +void mb_free_blocks_double(struct inode *inode, struct ext3_buddy *e3b,
901 + int first, int count)
904 + struct super_block *sb = e3b->bd_sb;
906 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
908 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
909 + for (i = 0; i < count; i++) {
910 + if (!mb_test_bit(first + i, e3b->bd_info->bb_bitmap)) {
911 + unsigned long blocknr;
912 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
913 + blocknr += first + i;
915 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
917 + ext3_error(sb, __FUNCTION__, "double-free of inode"
918 + " %lu's block %lu(bit %u in group %u)\n",
919 + inode ? inode->i_ino : 0, blocknr,
920 + first + i, e3b->bd_group);
922 + mb_clear_bit(first + i, e3b->bd_info->bb_bitmap);
926 +void mb_mark_used_double(struct ext3_buddy *e3b, int first, int count)
929 + if (unlikely(e3b->bd_info->bb_bitmap == NULL))
931 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
932 + for (i = 0; i < count; i++) {
933 + BUG_ON(mb_test_bit(first + i, e3b->bd_info->bb_bitmap));
934 + mb_set_bit(first + i, e3b->bd_info->bb_bitmap);
938 +void mb_cmp_bitmaps(struct ext3_buddy *e3b, void *bitmap)
940 + if (memcmp(e3b->bd_info->bb_bitmap, bitmap, e3b->bd_sb->s_blocksize)) {
941 + unsigned char *b1, *b2;
943 + b1 = (unsigned char *) e3b->bd_info->bb_bitmap;
944 + b2 = (unsigned char *) bitmap;
945 + for (i = 0; i < e3b->bd_sb->s_blocksize; i++) {
946 + if (b1[i] != b2[i]) {
947 + printk("corruption in group %u at byte %u(%u): "
948 + "%x in copy != %x on disk/prealloc\n",
949 + e3b->bd_group, i, i * 8, b1[i], b2[i]);
957 +#define mb_free_blocks_double(a,b,c,d)
958 +#define mb_mark_used_double(a,b,c)
959 +#define mb_cmp_bitmaps(a,b)
962 +#ifdef AGGRESSIVE_CHECK
964 +#define MB_CHECK_ASSERT(assert) \
967 + printk (KERN_EMERG \
968 + "Assertion failure in %s() at %s:%d: \"%s\"\n", \
969 + function, file, line, # assert); \
974 +static int __mb_check_buddy(struct ext3_buddy *e3b, char *file,
975 + const char *function, int line)
977 + struct super_block *sb = e3b->bd_sb;
978 + int order = e3b->bd_blkbits + 1;
979 + int max, max2, i, j, k, count;
980 + struct ext3_group_info *grp;
981 + int fragments = 0, fstart;
982 + struct list_head *cur;
983 + void *buddy, *buddy2;
985 + if (!test_opt(sb, MBALLOC))
989 + static int mb_check_counter = 0;
990 + if (mb_check_counter++ % 100 != 0)
994 + while (order > 1) {
995 + buddy = mb_find_buddy(e3b, order, &max);
996 + MB_CHECK_ASSERT(buddy);
997 + buddy2 = mb_find_buddy(e3b, order - 1, &max2);
998 + MB_CHECK_ASSERT(buddy2);
999 + MB_CHECK_ASSERT(buddy != buddy2);
1000 + MB_CHECK_ASSERT(max * 2 == max2);
1003 + for (i = 0; i < max; i++) {
1005 + if (mb_test_bit(i, buddy)) {
1006 + /* only single bit in buddy2 may be 1 */
1007 + if (!mb_test_bit(i << 1, buddy2))
1008 + MB_CHECK_ASSERT(mb_test_bit((i<<1)+1, buddy2));
1009 + else if (!mb_test_bit((i << 1) + 1, buddy2))
1010 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1014 + /* both bits in buddy2 must be 0 */
1015 + MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
1016 + MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
1018 + for (j = 0; j < (1 << order); j++) {
1019 + k = (i * (1 << order)) + j;
1020 + MB_CHECK_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
1024 + MB_CHECK_ASSERT(e3b->bd_info->bb_counters[order] == count);
1029 + buddy = mb_find_buddy(e3b, 0, &max);
1030 + for (i = 0; i < max; i++) {
1031 + if (!mb_test_bit(i, buddy)) {
1032 + MB_CHECK_ASSERT(i >= e3b->bd_info->bb_first_free);
1033 + if (fstart == -1) {
1040 + /* check used bits only */
1041 + for (j = 0; j < e3b->bd_blkbits + 1; j++) {
1042 + buddy2 = mb_find_buddy(e3b, j, &max2);
1044 + MB_CHECK_ASSERT(k < max2);
1045 + MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
1048 + MB_CHECK_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
1049 + MB_CHECK_ASSERT(e3b->bd_info->bb_fragments == fragments);
1051 + grp = EXT3_GROUP_INFO(sb, e3b->bd_group);
1052 + buddy = mb_find_buddy(e3b, 0, &max);
1053 + list_for_each(cur, &grp->bb_prealloc_list) {
1054 + unsigned long groupnr;
1055 + struct ext3_prealloc_space *pa;
1056 + pa = list_entry(cur, struct ext3_prealloc_space, group_list);
1057 + ext3_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
1058 + MB_CHECK_ASSERT(groupnr == e3b->bd_group);
1059 + for (i = 0; i < pa->len; i++)
1060 + MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
1064 +#undef MB_CHECK_ASSERT
1065 +#define mb_check_buddy(e3b) __mb_check_buddy(e3b,__FILE__,__FUNCTION__,__LINE__)
1067 +#define mb_check_buddy(e3b)
1070 +/* find most significant bit */
1071 +static int inline fmsb(unsigned short word)
1085 + } while (word != 0);
1091 +ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
1092 + int len, struct ext3_group_info *grp)
1094 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1095 + unsigned short min, max, chunk, border;
1097 + BUG_ON(len >= EXT3_BLOCKS_PER_GROUP(sb));
1099 + border = 2 << sb->s_blocksize_bits;
1102 + /* find how many blocks can be covered since this position */
1103 + max = ffs(first | border) - 1;
1105 + /* find how many blocks of power 2 we need to mark */
1112 + /* mark multiblock chunks only */
1113 + grp->bb_counters[min]++;
1115 + mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
1123 +ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
1126 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
1127 + unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
1128 + unsigned short i = 0, first, len;
1129 + unsigned free = 0, fragments = 0;
1130 + unsigned long long period = get_cycles();
1132 + /* initialize buddy from bitmap which is aggregation
1133 + * of on-disk bitmap and preallocations */
1134 + i = mb_find_next_zero_bit(bitmap, max, 0);
1135 + grp->bb_first_free = i;
1139 + i = ext2_find_next_le_bit(bitmap, max, i);
1143 + ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
1145 + grp->bb_counters[0]++;
1147 + i = mb_find_next_zero_bit(bitmap, max, i);
1149 + grp->bb_fragments = fragments;
1151 + if (free != grp->bb_free) {
1152 + printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
1153 + group, free, grp->bb_free);
1154 + grp->bb_free = free;
1157 + clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
1159 + period = get_cycles() - period;
1160 + spin_lock(&EXT3_SB(sb)->s_bal_lock);
1161 + EXT3_SB(sb)->s_mb_buddies_generated++;
1162 + EXT3_SB(sb)->s_mb_generation_time += period;
1163 + spin_unlock(&EXT3_SB(sb)->s_bal_lock);
1166 +static int ext3_mb_init_cache(struct page *page, char *incore)
1168 + int blocksize, blocks_per_page, groups_per_page;
1169 + int err = 0, i, first_group, first_block;
1170 + struct super_block *sb;
1171 + struct buffer_head *bhs;
1172 + struct buffer_head **bh;
1173 + struct inode *inode;
1174 + char *data, *bitmap;
1176 + mb_debug("init page %lu\n", page->index);
1178 + inode = page->mapping->host;
1180 + blocksize = 1 << inode->i_blkbits;
1181 + blocks_per_page = PAGE_CACHE_SIZE / blocksize;
1183 + groups_per_page = blocks_per_page >> 1;
1184 + if (groups_per_page == 0)
1185 + groups_per_page = 1;
1187 + /* allocate buffer_heads to read bitmaps */
1188 + if (groups_per_page > 1) {
1190 + i = sizeof(struct buffer_head *) * groups_per_page;
1191 + bh = kmalloc(i, GFP_NOFS);
1198 + first_group = page->index * blocks_per_page / 2;
1200 + /* read all groups the page covers into the cache */
1201 + for (i = 0; i < groups_per_page; i++) {
1202 + struct ext3_group_desc * desc;
1204 + if (first_group + i >= EXT3_SB(sb)->s_groups_count)
1208 + desc = ext3_get_group_desc(sb, first_group + i, NULL);
1213 + bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
1214 + if (bh[i] == NULL)
1217 + if (buffer_uptodate(bh[i]))
1220 + lock_buffer(bh[i]);
1221 + if (buffer_uptodate(bh[i])) {
1222 + unlock_buffer(bh[i]);
1227 + bh[i]->b_end_io = end_buffer_read_sync;
1228 + submit_bh(READ, bh[i]);
1229 + mb_debug("read bitmap for group %u\n", first_group + i);
1232 + /* wait for I/O completion */
1233 + for (i = 0; i < groups_per_page && bh[i]; i++)
1234 + wait_on_buffer(bh[i]);
1237 + for (i = 0; i < groups_per_page && bh[i]; i++)
1238 + if (!buffer_uptodate(bh[i]))
1241 + first_block = page->index * blocks_per_page;
1242 + for (i = 0; i < blocks_per_page; i++) {
1245 + group = (first_block + i) >> 1;
1246 + if (group >= EXT3_SB(sb)->s_groups_count)
1249 + data = page_address(page) + (i * blocksize);
1250 + bitmap = bh[group - first_group]->b_data;
1252 + if ((first_block + i) & 1) {
1253 + /* this is block of buddy */
1254 + BUG_ON(incore == NULL);
1255 + mb_debug("put buddy for group %u in page %lu/%x\n",
1256 + group, page->index, i * blocksize);
1257 + memset(data, 0xff, blocksize);
1258 + EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
1259 + memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
1260 + sizeof(unsigned short)*(sb->s_blocksize_bits+2));
1261 + ext3_mb_generate_buddy(sb, data, incore, group);
1264 + /* this is block of bitmap */
1265 + BUG_ON(incore != NULL);
1266 + mb_debug("put bitmap for group %u in page %lu/%x\n",
1267 + group, page->index, i * blocksize);
1269 + /* see comments in ext3_mb_put_pa() */
1270 + ext3_lock_group(sb, group);
1271 + memcpy(data, bitmap, blocksize);
1273 + /* mark all preallocated blocks used in in-core bitmap */
1274 + ext3_mb_generate_from_pa(sb, data, group);
1275 + ext3_unlock_group(sb, group);
1280 + SetPageUptodate(page);
1284 + for (i = 0; i < groups_per_page && bh[i]; i++)
1292 +static int ext3_mb_load_buddy(struct super_block *sb, int group,
1293 + struct ext3_buddy *e3b)
1295 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1296 + struct inode *inode = sbi->s_buddy_cache;
1297 + int blocks_per_page, block, pnum, poff;
1298 + struct page *page;
1300 + mb_debug("load group %u\n", group);
1302 + blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1304 + e3b->bd_blkbits = sb->s_blocksize_bits;
1305 + e3b->bd_info = EXT3_GROUP_INFO(sb, group);
1307 + e3b->bd_group = group;
1308 + e3b->bd_buddy_page = NULL;
1309 + e3b->bd_bitmap_page = NULL;
1311 + block = group * 2;
1312 + pnum = block / blocks_per_page;
1313 + poff = block % blocks_per_page;
1315 + /* we could use find_or_create_page(), but it locks page
1316 + * what we'd like to avoid in fast path ... */
1317 + page = find_get_page(inode->i_mapping, pnum);
1318 + if (page == NULL || !PageUptodate(page)) {
1320 + page_cache_release(page);
1321 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1323 + BUG_ON(page->mapping != inode->i_mapping);
1324 + if (!PageUptodate(page)) {
1325 + ext3_mb_init_cache(page, NULL);
1326 + mb_cmp_bitmaps(e3b, page_address(page) +
1327 + (poff * sb->s_blocksize));
1329 + unlock_page(page);
1332 + if (page == NULL || !PageUptodate(page))
1334 + e3b->bd_bitmap_page = page;
1335 + e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1336 + mark_page_accessed(page);
1339 + pnum = block / blocks_per_page;
1340 + poff = block % blocks_per_page;
1342 + page = find_get_page(inode->i_mapping, pnum);
1343 + if (page == NULL || !PageUptodate(page)) {
1345 + page_cache_release(page);
1346 + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1348 + BUG_ON(page->mapping != inode->i_mapping);
1349 + if (!PageUptodate(page))
1350 + ext3_mb_init_cache(page, e3b->bd_bitmap);
1352 + unlock_page(page);
1355 + if (page == NULL || !PageUptodate(page))
1357 + e3b->bd_buddy_page = page;
1358 + e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1359 + mark_page_accessed(page);
1361 + BUG_ON(e3b->bd_bitmap_page == NULL);
1362 + BUG_ON(e3b->bd_buddy_page == NULL);
1367 + if (e3b->bd_bitmap_page)
1368 + page_cache_release(e3b->bd_bitmap_page);
1369 + if (e3b->bd_buddy_page)
1370 + page_cache_release(e3b->bd_buddy_page);
1371 + e3b->bd_buddy = NULL;
1372 + e3b->bd_bitmap = NULL;
1376 +static void ext3_mb_release_desc(struct ext3_buddy *e3b)
1378 + if (e3b->bd_bitmap_page)
1379 + page_cache_release(e3b->bd_bitmap_page);
1380 + if (e3b->bd_buddy_page)
1381 + page_cache_release(e3b->bd_buddy_page);
1385 +static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
1390 + BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
1391 + BUG_ON(block >= (1 << (e3b->bd_blkbits + 3)));
1393 + bb = EXT3_MB_BUDDY(e3b);
1394 + while (order <= e3b->bd_blkbits + 1) {
1395 + block = block >> 1;
1396 + if (!mb_test_bit(block, bb)) {
1397 + /* this block is part of buddy of order 'order' */
1400 + bb += 1 << (e3b->bd_blkbits - order);
1406 +static inline void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1411 + while (cur < len) {
1412 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1413 + /* fast path: clear whole word at once */
1414 + addr = bm + (cur >> 3);
1419 + mb_clear_bit_atomic(lock, cur, bm);
1424 +static inline void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1429 + while (cur < len) {
1430 + if ((cur & 31) == 0 && (len - cur) >= 32) {
1431 + /* fast path: clear whole word at once */
1432 + addr = bm + (cur >> 3);
1433 + *addr = 0xffffffff;
1437 + mb_set_bit_atomic(lock, cur, bm);
1442 +static int mb_free_blocks(struct inode *inode, struct ext3_buddy *e3b,
1443 + int first, int count)
1445 + int block = 0, max = 0, order;
1446 + void *buddy, *buddy2;
1447 + struct super_block *sb = e3b->bd_sb;
1449 + BUG_ON(first + count > (sb->s_blocksize << 3));
1450 + BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
1451 + mb_check_buddy(e3b);
1452 + mb_free_blocks_double(inode, e3b, first, count);
1454 + e3b->bd_info->bb_free += count;
1455 + if (first < e3b->bd_info->bb_first_free)
1456 + e3b->bd_info->bb_first_free = first;
1458 + /* let's maintain fragments counter */
1460 + block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
1461 + if (first + count < EXT3_SB(sb)->s_mb_maxs[0])
1462 + max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
1464 + e3b->bd_info->bb_fragments--;
1465 + else if (!block && !max)
1466 + e3b->bd_info->bb_fragments++;
1468 + /* let's maintain buddy itself */
1469 + while (count-- > 0) {
1473 + if (!mb_test_bit(block, EXT3_MB_BITMAP(e3b))) {
1474 + unsigned long blocknr;
1475 + blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
1478 + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
1480 + ext3_error(sb, __FUNCTION__, "double-free of inode"
1481 + " %lu's block %lu(bit %u in group %u)\n",
1482 + inode ? inode->i_ino : 0, blocknr, block,
1485 + mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
1486 + e3b->bd_info->bb_counters[order]++;
1488 + /* start of the buddy */
1489 + buddy = mb_find_buddy(e3b, order, &max);
1493 + if (mb_test_bit(block, buddy) ||
1494 + mb_test_bit(block + 1, buddy))
1497 + /* both the buddies are free, try to coalesce them */
1498 + buddy2 = mb_find_buddy(e3b, order + 1, &max);
1504 + /* for special purposes, we don't set
1505 + * free bits in bitmap */
1506 + mb_set_bit(block, buddy);
1507 + mb_set_bit(block + 1, buddy);
1509 + e3b->bd_info->bb_counters[order]--;
1510 + e3b->bd_info->bb_counters[order]--;
1512 + block = block >> 1;
1514 + e3b->bd_info->bb_counters[order]++;
1516 + mb_clear_bit(block, buddy2);
1520 + mb_check_buddy(e3b);
1525 +static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
1526 + int needed, struct ext3_free_extent *ex)
1528 + int next = block, max, ord;
1531 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1532 + BUG_ON(ex == NULL);
1534 + buddy = mb_find_buddy(e3b, order, &max);
1535 + BUG_ON(buddy == NULL);
1536 + BUG_ON(block >= max);
1537 + if (mb_test_bit(block, buddy)) {
1544 + if (likely(order == 0)) {
1545 + /* find actual order */
1546 + order = mb_find_order_for_block(e3b, block);
1547 + block = block >> order;
1550 + ex->fe_len = 1 << order;
1551 + ex->fe_start = block << order;
1552 + ex->fe_group = e3b->bd_group;
1554 + /* calc difference from given start */
1555 + next = next - ex->fe_start;
1556 + ex->fe_len -= next;
1557 + ex->fe_start += next;
1559 + while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
1561 + if (block + 1 >= max)
1564 + next = (block + 1) * (1 << order);
1565 + if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
1568 + ord = mb_find_order_for_block(e3b, next);
1571 + block = next >> order;
1572 + ex->fe_len += 1 << order;
1575 + BUG_ON(ex->fe_start + ex->fe_len > (1 << (e3b->bd_blkbits + 3)));
1576 + return ex->fe_len;
1579 +static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
1581 + int ord, mlen = 0, max = 0, cur;
1582 + int start = ex->fe_start;
1583 + int len = ex->fe_len;
1588 + BUG_ON(start + len > (e3b->bd_sb->s_blocksize << 3));
1589 + BUG_ON(e3b->bd_group != ex->fe_group);
1590 + BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
1591 + mb_check_buddy(e3b);
1592 + mb_mark_used_double(e3b, start, len);
1594 + e3b->bd_info->bb_free -= len;
1595 + if (e3b->bd_info->bb_first_free == start)
1596 + e3b->bd_info->bb_first_free += len;
1598 + /* let's maintain fragments counter */
1600 + mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
1601 + if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
1602 + max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
1604 + e3b->bd_info->bb_fragments++;
1605 + else if (!mlen && !max)
1606 + e3b->bd_info->bb_fragments--;
1608 + /* let's maintain buddy itself */
1610 + ord = mb_find_order_for_block(e3b, start);
1612 + if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1613 + /* the whole chunk may be allocated at once! */
1615 + buddy = mb_find_buddy(e3b, ord, &max);
1616 + BUG_ON((start >> ord) >= max);
1617 + mb_set_bit(start >> ord, buddy);
1618 + e3b->bd_info->bb_counters[ord]--;
1625 + /* store for history */
1627 + ret = len | (ord << 16);
1629 + /* we have to split large buddy */
1631 + buddy = mb_find_buddy(e3b, ord, &max);
1632 + mb_set_bit(start >> ord, buddy);
1633 + e3b->bd_info->bb_counters[ord]--;
1636 + cur = (start >> ord) & ~1U;
1637 + buddy = mb_find_buddy(e3b, ord, &max);
1638 + mb_clear_bit(cur, buddy);
1639 + mb_clear_bit(cur + 1, buddy);
1640 + e3b->bd_info->bb_counters[ord]++;
1641 + e3b->bd_info->bb_counters[ord]++;
1644 + mb_set_bits(sb_bgl_lock(EXT3_SB(e3b->bd_sb), ex->fe_group),
1645 + EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
1646 + mb_check_buddy(e3b);
1652 + * Must be called under group lock!
1654 +static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
1655 + struct ext3_buddy *e3b)
1657 + unsigned long ret;
1659 + BUG_ON(ac->ac_b_ex.fe_group != e3b->bd_group);
1660 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1662 + ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1663 + ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1664 + ret = mb_mark_used(e3b, &ac->ac_b_ex);
1666 + /* preallocation can change ac_b_ex, thus we store actually
1667 + * allocated blocks for history */
1668 + ac->ac_f_ex = ac->ac_b_ex;
1670 + ac->ac_status = AC_STATUS_FOUND;
1671 + ac->ac_tail = ret & 0xffff;
1672 + ac->ac_buddy = ret >> 16;
1674 + /* XXXXXXX: SUCH A HORRIBLE **CK */
1675 + ac->ac_bitmap_page = e3b->bd_bitmap_page;
1676 + get_page(ac->ac_bitmap_page);
1677 + ac->ac_buddy_page = e3b->bd_buddy_page;
1678 + get_page(ac->ac_buddy_page);
1682 + * regular allocator, for general purposes allocation
1685 +void ext3_mb_check_limits(struct ext3_allocation_context *ac,
1686 + struct ext3_buddy *e3b,
1689 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1690 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1691 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1692 + struct ext3_free_extent ex;
1696 + * We don't want to scan for a whole year
1698 + if (ac->ac_found > sbi->s_mb_max_to_scan &&
1699 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1700 + ac->ac_status = AC_STATUS_BREAK;
1705 + * Haven't found good chunk so far, let's continue
1707 + if (bex->fe_len < gex->fe_len)
1710 + if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1711 + && bex->fe_group == e3b->bd_group) {
1712 + /* recheck chunk's availability - we don't know
1713 + * when it was found (within this lock-unlock
1714 + * period or not) */
1715 + max = mb_find_extent(e3b, 0, bex->fe_start, gex->fe_len, &ex);
1716 + if (max >= gex->fe_len) {
1717 + ext3_mb_use_best_found(ac, e3b);
1724 + * The routine checks whether found extent is good enough. If it is,
1725 + * then the extent gets marked used and flag is set to the context
1726 + * to stop scanning. Otherwise, the extent is compared with the
1727 + * previous found extent and if new one is better, then it's stored
1728 + * in the context. Later, the best found extent will be used, if
1729 + * mballoc can't find good enough extent.
1731 + * FIXME: real allocation policy is to be designed yet!
1733 +static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
1734 + struct ext3_free_extent *ex,
1735 + struct ext3_buddy *e3b)
1737 + struct ext3_free_extent *bex = &ac->ac_b_ex;
1738 + struct ext3_free_extent *gex = &ac->ac_g_ex;
1740 + BUG_ON(ex->fe_len <= 0);
1741 + BUG_ON(ex->fe_len >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1742 + BUG_ON(ex->fe_start >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
1743 + BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1748 + * The special case - take what you catch first
1750 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
1752 + ext3_mb_use_best_found(ac, e3b);
1757 + * Let's check whether the chuck is good enough
1759 + if (ex->fe_len == gex->fe_len) {
1761 + ext3_mb_use_best_found(ac, e3b);
1766 + * If this is first found extent, just store it in the context
1768 + if (bex->fe_len == 0) {
1774 + * If new found extent is better, store it in the context
1776 + if (bex->fe_len < gex->fe_len) {
1777 + /* if the request isn't satisfied, any found extent
1778 + * larger than previous best one is better */
1779 + if (ex->fe_len > bex->fe_len)
1781 + } else if (ex->fe_len > gex->fe_len) {
1782 + /* if the request is satisfied, then we try to find
1783 + * an extent that still satisfy the request, but is
1784 + * smaller than previous one */
1788 + ext3_mb_check_limits(ac, e3b, 0);
1791 +static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
1792 + struct ext3_buddy *e3b)
1794 + struct ext3_free_extent ex = ac->ac_b_ex;
1795 + int group = ex.fe_group, max, err;
1797 + BUG_ON(ex.fe_len <= 0);
1798 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1802 + ext3_lock_group(ac->ac_sb, group);
1803 + max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
1807 + ext3_mb_use_best_found(ac, e3b);
1810 + ext3_unlock_group(ac->ac_sb, group);
1811 + ext3_mb_release_desc(e3b);
1816 +static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
1817 + struct ext3_buddy *e3b)
1819 + int group = ac->ac_g_ex.fe_group, max, err;
1820 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
1821 + struct ext3_super_block *es = sbi->s_es;
1822 + struct ext3_free_extent ex;
1824 + err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
1828 + ext3_lock_group(ac->ac_sb, group);
1829 + max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
1830 + ac->ac_g_ex.fe_len, &ex);
1832 + if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1833 + unsigned long start;
1834 + start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
1835 + ex.fe_start + le32_to_cpu(es->s_first_data_block));
1836 + if (start % sbi->s_stripe == 0) {
1839 + ext3_mb_use_best_found(ac, e3b);
1841 + } else if (max >= ac->ac_g_ex.fe_len) {
1842 + BUG_ON(ex.fe_len <= 0);
1843 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1844 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1847 + ext3_mb_use_best_found(ac, e3b);
1848 + } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
1849 + /* Sometimes, caller may want to merge even small
1850 + * number of blocks to an existing extent */
1851 + BUG_ON(ex.fe_len <= 0);
1852 + BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1853 + BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1856 + ext3_mb_use_best_found(ac, e3b);
1858 + ext3_unlock_group(ac->ac_sb, group);
1859 + ext3_mb_release_desc(e3b);
1865 + * The routine scans buddy structures (not bitmap!) from given order
1866 + * to max order and tries to find big enough chunk to satisfy the req
1868 +static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
1869 + struct ext3_buddy *e3b)
1871 + struct super_block *sb = ac->ac_sb;
1872 + struct ext3_group_info *grp = e3b->bd_info;
1876 + BUG_ON(ac->ac_2order <= 0);
1877 + for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1878 + if (grp->bb_counters[i] == 0)
1881 + buddy = mb_find_buddy(e3b, i, &max);
1882 + BUG_ON(buddy == NULL);
1884 + k = mb_find_next_zero_bit(buddy, max, 0);
1889 + ac->ac_b_ex.fe_len = 1 << i;
1890 + ac->ac_b_ex.fe_start = k << i;
1891 + ac->ac_b_ex.fe_group = e3b->bd_group;
1893 + ext3_mb_use_best_found(ac, e3b);
1895 + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1897 + if (EXT3_SB(sb)->s_mb_stats)
1898 + atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
1905 + * The routine scans the group and measures all found extents.
1906 + * In order to optimize scanning, caller must pass number of
1907 + * free blocks in the group, so the routine can know upper limit.
1909 +static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
1910 + struct ext3_buddy *e3b)
1912 + struct super_block *sb = ac->ac_sb;
1913 + void *bitmap = EXT3_MB_BITMAP(e3b);
1914 + struct ext3_free_extent ex;
1917 + free = e3b->bd_info->bb_free;
1918 + BUG_ON(free <= 0);
1920 + i = e3b->bd_info->bb_first_free;
1922 + while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1923 + i = mb_find_next_zero_bit(bitmap, EXT3_BLOCKS_PER_GROUP(sb), i);
1924 + if (i >= EXT3_BLOCKS_PER_GROUP(sb)) {
1925 + BUG_ON(free != 0);
1929 + mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
1930 + BUG_ON(ex.fe_len <= 0);
1931 + BUG_ON(free < ex.fe_len);
1933 + ext3_mb_measure_extent(ac, &ex, e3b);
1936 + free -= ex.fe_len;
1939 + ext3_mb_check_limits(ac, e3b, 1);
1943 + * This is a special case for storages like raid5
1944 + * we try to find stripe-aligned chunks for stripe-size requests
1946 +static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
1947 + struct ext3_buddy *e3b)
1949 + struct super_block *sb = ac->ac_sb;
1950 + struct ext3_sb_info *sbi = EXT3_SB(sb);
1951 + void *bitmap = EXT3_MB_BITMAP(e3b);
1952 + struct ext3_free_extent ex;
1953 + unsigned long i, max;
1955 + BUG_ON(sbi->s_stripe == 0);
1957 + /* find first stripe-aligned block */
1958 + i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
1959 + + le32_to_cpu(sbi->s_es->s_first_data_block);
1960 + i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
1961 + i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
1962 + % EXT3_BLOCKS_PER_GROUP(sb);
1964 + while (i < EXT3_BLOCKS_PER_GROUP(sb)) {
1965 + if (!mb_test_bit(i, bitmap)) {
1966 + max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
1967 + if (max >= sbi->s_stripe) {
1970 + ext3_mb_use_best_found(ac, e3b);
1974 + i += sbi->s_stripe;
1978 +static int ext3_mb_good_group(struct ext3_allocation_context *ac,
1979 + int group, int cr)
1981 + struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
1982 + unsigned free, fragments, i, bits;
1984 + BUG_ON(cr < 0 || cr >= 4);
1985 + BUG_ON(EXT3_MB_GRP_NEED_INIT(grp));
1987 + free = grp->bb_free;
1988 + fragments = grp->bb_fragments;
1991 + if (fragments == 0)
1996 + BUG_ON(ac->ac_2order == 0);
1997 + bits = ac->ac_sb->s_blocksize_bits + 1;
1998 + for (i = ac->ac_2order; i <= bits; i++)
1999 + if (grp->bb_counters[i] > 0)
2003 + if ((free / fragments) >= ac->ac_g_ex.fe_len)
2007 + if (free >= ac->ac_g_ex.fe_len)
2019 +int ext3_mb_regular_allocator(struct ext3_allocation_context *ac)
2021 + int group, i, cr, err = 0;
2022 + struct ext3_sb_info *sbi;
2023 + struct super_block *sb;
2024 + struct ext3_buddy e3b;
2027 + sbi = EXT3_SB(sb);
2028 + BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2030 + /* first, try the goal */
2031 + err = ext3_mb_find_by_goal(ac, &e3b);
2032 + if (err || ac->ac_status == AC_STATUS_FOUND)
2035 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
2038 + i = ffs(ac->ac_g_ex.fe_len);
2039 + ac->ac_2order = 0;
2040 + if (i >= sbi->s_mb_order2_reqs) {
2042 + if ((ac->ac_g_ex.fe_len & (~(1 << i))) == 0)
2043 + ac->ac_2order = i;
2046 + group = ac->ac_g_ex.fe_group;
2048 + /* Let's just scan groups to find more-less suitable blocks */
2049 + cr = ac->ac_2order ? 0 : 1;
2051 + for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2052 + ac->ac_criteria = cr;
2053 + for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
2054 + struct ext3_group_info *grp;
2056 + if (group == EXT3_SB(sb)->s_groups_count)
2059 + /* quick check to skip empty groups */
2060 + grp = EXT3_GROUP_INFO(ac->ac_sb, group);
2061 + if (grp->bb_free == 0)
2064 + if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
2065 + /* we need full data about the group
2066 + * to make a good selection */
2067 + err = ext3_mb_load_buddy(sb, group, &e3b);
2070 + ext3_mb_release_desc(&e3b);
2073 + /* check is group good for our criteries */
2074 + if (!ext3_mb_good_group(ac, group, cr))
2077 + err = ext3_mb_load_buddy(sb, group, &e3b);
2081 + ext3_lock_group(sb, group);
2082 + if (!ext3_mb_good_group(ac, group, cr)) {
2083 + /* someone did allocation from this group */
2084 + ext3_unlock_group(sb, group);
2085 + ext3_mb_release_desc(&e3b);
2089 + ac->ac_groups_scanned++;
2091 + ext3_mb_simple_scan_group(ac, &e3b);
2092 + else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe)
2093 + ext3_mb_scan_aligned(ac, &e3b);
2095 + ext3_mb_complex_scan_group(ac, &e3b);
2097 + ext3_unlock_group(sb, group);
2098 + ext3_mb_release_desc(&e3b);
2100 + if (ac->ac_status != AC_STATUS_CONTINUE)
2105 + if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2106 + !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
2108 + * We've been searching too long. Let's try to allocate
2109 + * the best chunk we've found so far
2112 + ext3_mb_try_best_found(ac, &e3b);
2113 + if (ac->ac_status != AC_STATUS_FOUND) {
2115 + * Someone more lucky has already allocated it.
2116 + * The only thing we can do is just take first
2118 + printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
2120 + ac->ac_b_ex.fe_group = 0;
2121 + ac->ac_b_ex.fe_start = 0;
2122 + ac->ac_b_ex.fe_len = 0;
2123 + ac->ac_status = AC_STATUS_CONTINUE;
2124 + ac->ac_flags |= EXT3_MB_HINT_FIRST;
2126 + atomic_inc(&sbi->s_mb_lost_chunks);
2134 +#ifdef EXT3_MB_HISTORY
2135 +struct ext3_mb_proc_session {
2136 + struct ext3_mb_history *history;
2137 + struct super_block *sb;
2142 +static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
2143 + struct ext3_mb_history *hs,
2146 + if (hs == s->history + s->max)
2148 + if (!first && hs == s->history + s->start)
2150 + while (hs->orig.fe_len == 0) {
2152 + if (hs == s->history + s->max)
2154 + if (hs == s->history + s->start)
2160 +static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2162 + struct ext3_mb_proc_session *s = seq->private;
2163 + struct ext3_mb_history *hs;
2167 + return SEQ_START_TOKEN;
2168 + hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2171 + while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2175 +static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
2177 + struct ext3_mb_proc_session *s = seq->private;
2178 + struct ext3_mb_history *hs = v;
2181 + if (v == SEQ_START_TOKEN)
2182 + return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
2184 + return ext3_mb_history_skip_empty(s, ++hs, 0);
2187 +static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
2189 + char buf[25], buf2[25], buf3[25], *fmt;
2190 + struct ext3_mb_history *hs = v;
2192 + if (v == SEQ_START_TOKEN) {
2193 + seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2194 + "%-5s %-2s %-5s %-5s %-5s %-6s\n",
2195 + "pid", "inode", "original", "goal", "result","found",
2196 + "grps", "cr", "flags", "merge", "tail", "broken");
2200 + if (hs->op == EXT3_MB_HISTORY_ALLOC) {
2201 + fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2202 + "%-5u %-5s %-5u %-6u\n";
2203 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2204 + hs->result.fe_start, hs->result.fe_len,
2205 + hs->result.fe_logical);
2206 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2207 + hs->orig.fe_start, hs->orig.fe_len,
2208 + hs->orig.fe_logical);
2209 + sprintf(buf3, "%lu/%lu/%lu@%lu", hs->goal.fe_group,
2210 + hs->goal.fe_start, hs->goal.fe_len,
2211 + hs->goal.fe_logical);
2212 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2213 + hs->found, hs->groups, hs->cr, hs->flags,
2214 + hs->merged ? "M" : "", hs->tail,
2215 + hs->buddy ? 1 << hs->buddy : 0);
2216 + } else if (hs->op == EXT3_MB_HISTORY_PREALLOC) {
2217 + fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2218 + sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
2219 + hs->result.fe_start, hs->result.fe_len,
2220 + hs->result.fe_logical);
2221 + sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
2222 + hs->orig.fe_start, hs->orig.fe_len,
2223 + hs->orig.fe_logical);
2224 + seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2225 + } else if (hs->op == EXT3_MB_HISTORY_DISCARD) {
2226 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2227 + hs->result.fe_start, hs->result.fe_len);
2228 + seq_printf(seq, "%-5u %-8u %-23s discard\n",
2229 + hs->pid, hs->ino, buf2);
2230 + } else if (hs->op == EXT3_MB_HISTORY_FREE) {
2231 + sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
2232 + hs->result.fe_start, hs->result.fe_len);
2233 + seq_printf(seq, "%-5u %-8u %-23s free\n",
2234 + hs->pid, hs->ino, buf2);
2239 +static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
2243 +static struct seq_operations ext3_mb_seq_history_ops = {
2244 + .start = ext3_mb_seq_history_start,
2245 + .next = ext3_mb_seq_history_next,
2246 + .stop = ext3_mb_seq_history_stop,
2247 + .show = ext3_mb_seq_history_show,
2250 +static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
2252 + struct super_block *sb = PDE(inode)->data;
2253 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2254 + struct ext3_mb_proc_session *s;
2257 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2261 + size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
2262 + s->history = kmalloc(size, GFP_KERNEL);
2263 + if (s->history == NULL) {
2268 + spin_lock(&sbi->s_mb_history_lock);
2269 + memcpy(s->history, sbi->s_mb_history, size);
2270 + s->max = sbi->s_mb_history_max;
2271 + s->start = sbi->s_mb_history_cur % s->max;
2272 + spin_unlock(&sbi->s_mb_history_lock);
2274 + rc = seq_open(file, &ext3_mb_seq_history_ops);
2276 + struct seq_file *m = (struct seq_file *)file->private_data;
2279 + kfree(s->history);
2286 +static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
2288 + struct seq_file *seq = (struct seq_file *)file->private_data;
2289 + struct ext3_mb_proc_session *s = seq->private;
2290 + kfree(s->history);
2292 + return seq_release(inode, file);
2295 +static ssize_t ext3_mb_seq_history_write(struct file *file,
2296 + const char __user *buffer,
2297 + size_t count, loff_t *ppos)
2299 + struct seq_file *seq = (struct seq_file *)file->private_data;
2300 + struct ext3_mb_proc_session *s = seq->private;
2301 + struct super_block *sb = s->sb;
2305 + if (count >= sizeof(str)) {
2306 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2307 + "mb_history", (int)sizeof(str));
2308 + return -EOVERFLOW;
2311 + if (copy_from_user(str, buffer, count))
2314 + value = simple_strtol(str, NULL, 0);
2317 + EXT3_SB(sb)->s_mb_history_filter = value;
2322 +static struct file_operations ext3_mb_seq_history_fops = {
2323 + .owner = THIS_MODULE,
2324 + .open = ext3_mb_seq_history_open,
2326 + .write = ext3_mb_seq_history_write,
2327 + .llseek = seq_lseek,
2328 + .release = ext3_mb_seq_history_release,
2331 +static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2333 + struct super_block *sb = seq->private;
2334 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2337 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2341 + return (void *) group;
2344 +static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2346 + struct super_block *sb = seq->private;
2347 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2351 + if (*pos < 0 || *pos >= sbi->s_groups_count)
2354 + return (void *) group;;
2357 +static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
2359 + struct super_block *sb = seq->private;
2360 + long group = (long) v;
2362 + struct ext3_buddy e3b;
2364 + struct ext3_group_info info;
2365 + unsigned short counters[16];
2370 + seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2371 + "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2372 + "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2373 + "group", "free", "frags", "first",
2374 + "2^0", "2^1", "2^2", "2^3", "2^4", "2^5","2^6",
2375 + "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2377 + i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2378 + sizeof(struct ext3_group_info);
2379 + err = ext3_mb_load_buddy(sb, group, &e3b);
2381 + seq_printf(seq, "#%-5lu: I/O error\n", group);
2384 + ext3_lock_group(sb, group);
2385 + memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
2386 + ext3_unlock_group(sb, group);
2387 + ext3_mb_release_desc(&e3b);
2389 + seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2390 + sg.info.bb_fragments, sg.info.bb_first_free);
2391 + for (i = 0; i <= 13; i++)
2392 + seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2393 + sg.info.bb_counters[i] : 0);
2394 + seq_printf(seq, " ]\n");
2399 +static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
2403 +static struct seq_operations ext3_mb_seq_groups_ops = {
2404 + .start = ext3_mb_seq_groups_start,
2405 + .next = ext3_mb_seq_groups_next,
2406 + .stop = ext3_mb_seq_groups_stop,
2407 + .show = ext3_mb_seq_groups_show,
2410 +static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
2412 + struct super_block *sb = PDE(inode)->data;
2415 + rc = seq_open(file, &ext3_mb_seq_groups_ops);
2417 + struct seq_file *m = (struct seq_file *)file->private_data;
2424 +static struct file_operations ext3_mb_seq_groups_fops = {
2425 + .owner = THIS_MODULE,
2426 + .open = ext3_mb_seq_groups_open,
2428 + .llseek = seq_lseek,
2429 + .release = seq_release,
2432 +static void ext3_mb_history_release(struct super_block *sb)
2434 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2436 + remove_proc_entry("mb_groups", sbi->s_dev_proc);
2437 + remove_proc_entry("mb_history", sbi->s_dev_proc);
2439 + if (sbi->s_mb_history)
2440 + kfree(sbi->s_mb_history);
2443 +static void ext3_mb_history_init(struct super_block *sb)
2445 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2448 + if (sbi->s_dev_proc != NULL) {
2449 + struct proc_dir_entry *p;
2450 + p = create_proc_entry("mb_history", S_IRUGO, sbi->s_dev_proc);
2452 + p->proc_fops = &ext3_mb_seq_history_fops;
2455 + p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_dev_proc);
2457 + p->proc_fops = &ext3_mb_seq_groups_fops;
2462 + sbi->s_mb_history_max = 1000;
2463 + sbi->s_mb_history_cur = 0;
2464 + spin_lock_init(&sbi->s_mb_history_lock);
2465 + i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
2466 + sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
2467 + if (likely(sbi->s_mb_history != NULL))
2468 + memset(sbi->s_mb_history, 0, i);
2469 + /* if we can't allocate history, then we simple won't use it */
2473 +ext3_mb_store_history(struct ext3_allocation_context *ac)
2475 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
2476 + struct ext3_mb_history h;
2478 + if (unlikely(sbi->s_mb_history == NULL))
2481 + if (!(ac->ac_op & sbi->s_mb_history_filter))
2485 + h.pid = current->pid;
2486 + h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2487 + h.orig = ac->ac_o_ex;
2488 + h.result = ac->ac_b_ex;
2489 + h.flags = ac->ac_flags;
2491 + if (ac->ac_op == EXT3_MB_HISTORY_ALLOC) {
2492 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2493 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2495 + h.goal = ac->ac_g_ex;
2496 + h.result = ac->ac_f_ex;
2499 + spin_lock(&sbi->s_mb_history_lock);
2500 + memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2501 + if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2502 + sbi->s_mb_history_cur = 0;
2503 + spin_unlock(&sbi->s_mb_history_lock);
2507 +#define ext3_mb_history_release(sb)
2508 +#define ext3_mb_history_init(sb)
2511 +int ext3_mb_init_backend(struct super_block *sb)
2513 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2514 + int i, j, len, metalen;
2515 + int num_meta_group_infos =
2516 + (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2517 + EXT3_DESC_PER_BLOCK_BITS(sb);
2518 + struct ext3_group_info **meta_group_info;
2520 + /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2521 + * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2522 + * So a two level scheme suffices for now. */
2523 + sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
2524 + num_meta_group_infos, GFP_KERNEL);
2525 + if (sbi->s_group_info == NULL) {
2526 + printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
2529 + sbi->s_buddy_cache = new_inode(sb);
2530 + if (sbi->s_buddy_cache == NULL) {
2531 + printk(KERN_ERR "EXT3-fs: can't get new inode\n");
2534 + EXT3_I(sbi->s_buddy_cache)->i_disksize = 0;
2536 + metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
2537 + for (i = 0; i < num_meta_group_infos; i++) {
2538 + if ((i + 1) == num_meta_group_infos)
2539 + metalen = sizeof(*meta_group_info) *
2540 + (sbi->s_groups_count -
2541 + (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
2542 + meta_group_info = kmalloc(metalen, GFP_KERNEL);
2543 + if (meta_group_info == NULL) {
2544 + printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
2546 + goto err_freemeta;
2548 + sbi->s_group_info[i] = meta_group_info;
2552 + * calculate needed size. if change bb_counters size,
2553 + * don't forget about ext3_mb_generate_buddy()
2555 + len = sizeof(struct ext3_group_info);
2556 + len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
2557 + for (i = 0; i < sbi->s_groups_count; i++) {
2558 + struct ext3_group_desc * desc;
2561 + sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
2562 + j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
2564 + meta_group_info[j] = kmalloc(len, GFP_KERNEL);
2565 + if (meta_group_info[j] == NULL) {
2566 + printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
2568 + goto err_freebuddy;
2570 + desc = ext3_get_group_desc(sb, i, NULL);
2571 + if (desc == NULL) {
2572 + printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
2573 + goto err_freebuddy;
2575 + memset(meta_group_info[j], 0, len);
2576 + set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
2577 + &meta_group_info[j]->bb_state);
2579 + /* initialize bb_free to be able to skip
2580 + * empty groups without initialization */
2581 + meta_group_info[j]->bb_free =
2582 + le16_to_cpu(desc->bg_free_blocks_count);
2584 + INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
2586 +#ifdef DOUBLE_CHECK
2588 + struct buffer_head *bh;
2589 + meta_group_info[j]->bb_bitmap =
2590 + kmalloc(sb->s_blocksize, GFP_KERNEL);
2591 + BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
2592 + bh = read_block_bitmap(sb, i);
2593 + BUG_ON(bh == NULL);
2594 + memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
2606 + kfree(EXT3_GROUP_INFO(sb, i));
2609 + i = num_meta_group_infos;
2612 + kfree(sbi->s_group_info[i]);
2613 + iput(sbi->s_buddy_cache);
2615 + kfree(sbi->s_group_info);
2619 +int ext3_mb_init(struct super_block *sb, int needs_recovery)
2621 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2622 + unsigned i, offset, max;
2624 + if (!test_opt(sb, MBALLOC))
2627 + i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2629 + sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2630 + if (sbi->s_mb_offsets == NULL) {
2631 + clear_opt(sbi->s_mount_opt, MBALLOC);
2634 + sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2635 + if (sbi->s_mb_maxs == NULL) {
2636 + clear_opt(sbi->s_mount_opt, MBALLOC);
2637 + kfree(sbi->s_mb_maxs);
2641 + /* order 0 is regular bitmap */
2642 + sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2643 + sbi->s_mb_offsets[0] = 0;
2647 + max = sb->s_blocksize << 2;
2649 + sbi->s_mb_offsets[i] = offset;
2650 + sbi->s_mb_maxs[i] = max;
2651 + offset += 1 << (sb->s_blocksize_bits - i);
2654 + } while (i <= sb->s_blocksize_bits + 1);
2656 + /* init file for buddy data */
2657 + if ((i = ext3_mb_init_backend(sb))) {
2658 + clear_opt(sbi->s_mount_opt, MBALLOC);
2659 + kfree(sbi->s_mb_offsets);
2660 + kfree(sbi->s_mb_maxs);
2664 + spin_lock_init(&sbi->s_md_lock);
2665 + INIT_LIST_HEAD(&sbi->s_active_transaction);
2666 + INIT_LIST_HEAD(&sbi->s_closed_transaction);
2667 + INIT_LIST_HEAD(&sbi->s_committed_transaction);
2668 + spin_lock_init(&sbi->s_bal_lock);
2670 + sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2671 + sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2672 + sbi->s_mb_max_groups_to_scan = MB_DEFAULT_MAX_GROUPS_TO_SCAN;
2673 + sbi->s_mb_stats = MB_DEFAULT_STATS;
2674 + sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2675 + sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2676 + sbi->s_mb_history_filter = EXT3_MB_HISTORY_DEFAULT;
2678 + i = sizeof(struct ext3_locality_group) * num_possible_cpus();
2679 + sbi->s_locality_groups = kmalloc(i, GFP_NOFS);
2680 + if (sbi->s_locality_groups == NULL) {
2681 + clear_opt(sbi->s_mount_opt, MBALLOC);
2682 + kfree(sbi->s_mb_offsets);
2683 + kfree(sbi->s_mb_maxs);
2686 + for (i = 0; i < num_possible_cpus(); i++) {
2687 + struct ext3_locality_group *lg;
2688 + lg = &sbi->s_locality_groups[i];
2689 + sema_init(&lg->lg_sem, 1);
2690 + INIT_LIST_HEAD(&lg->lg_prealloc_list);
2691 + spin_lock_init(&lg->lg_prealloc_lock);
2694 + ext3_mb_init_per_dev_proc(sb);
2695 + ext3_mb_history_init(sb);
2697 + printk("EXT3-fs: mballoc enabled\n");
2701 +void ext3_mb_cleanup_pa(struct ext3_group_info *grp)
2703 + struct ext3_prealloc_space *pa;
2704 + struct list_head *cur, *tmp;
2707 + list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2708 + pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
2709 + list_del_rcu(&pa->pa_group_list);
2714 + mb_debug("mballoc: %u PAs left\n", count);
2718 +int ext3_mb_release(struct super_block *sb)
2720 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2721 + int i, num_meta_group_infos;
2723 + if (!test_opt(sb, MBALLOC))
2726 + /* release freed, non-committed blocks */
2727 + spin_lock(&sbi->s_md_lock);
2728 + list_splice_init(&sbi->s_closed_transaction,
2729 + &sbi->s_committed_transaction);
2730 + list_splice_init(&sbi->s_active_transaction,
2731 + &sbi->s_committed_transaction);
2732 + spin_unlock(&sbi->s_md_lock);
2733 + ext3_mb_free_committed_blocks(sb);
2735 + if (sbi->s_group_info) {
2736 + for (i = 0; i < sbi->s_groups_count; i++) {
2737 +#ifdef DOUBLE_CHECK
2738 + if (EXT3_GROUP_INFO(sb, i)->bb_bitmap)
2739 + kfree(EXT3_GROUP_INFO(sb, i)->bb_bitmap);
2741 + ext3_mb_cleanup_pa(EXT3_GROUP_INFO(sb, i));
2742 + kfree(EXT3_GROUP_INFO(sb, i));
2744 + num_meta_group_infos = (sbi->s_groups_count +
2745 + EXT3_DESC_PER_BLOCK(sb) - 1) >>
2746 + EXT3_DESC_PER_BLOCK_BITS(sb);
2747 + for (i = 0; i < num_meta_group_infos; i++)
2748 + kfree(sbi->s_group_info[i]);
2749 + kfree(sbi->s_group_info);
2751 + if (sbi->s_mb_offsets)
2752 + kfree(sbi->s_mb_offsets);
2753 + if (sbi->s_mb_maxs)
2754 + kfree(sbi->s_mb_maxs);
2755 + if (sbi->s_buddy_cache)
2756 + iput(sbi->s_buddy_cache);
2757 + if (sbi->s_mb_stats) {
2758 + printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
2759 + atomic_read(&sbi->s_bal_allocated),
2760 + atomic_read(&sbi->s_bal_reqs),
2761 + atomic_read(&sbi->s_bal_success));
2762 + printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
2763 + "%u 2^N hits, %u breaks, %u lost\n",
2764 + atomic_read(&sbi->s_bal_ex_scanned),
2765 + atomic_read(&sbi->s_bal_goals),
2766 + atomic_read(&sbi->s_bal_2orders),
2767 + atomic_read(&sbi->s_bal_breaks),
2768 + atomic_read(&sbi->s_mb_lost_chunks));
2769 + printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
2770 + sbi->s_mb_buddies_generated++,
2771 + sbi->s_mb_generation_time);
2772 + printk("EXT3-fs: mballoc: %u preallocated, %u discarded\n",
2773 + atomic_read(&sbi->s_mb_preallocated),
2774 + atomic_read(&sbi->s_mb_discarded));
2777 + if (sbi->s_locality_groups)
2778 + kfree(sbi->s_locality_groups);
2780 + ext3_mb_history_release(sb);
2781 + ext3_mb_destroy_per_dev_proc(sb);
2786 +void ext3_mb_free_committed_blocks(struct super_block *sb)
2788 + struct ext3_sb_info *sbi = EXT3_SB(sb);
2789 + int err, i, count = 0, count2 = 0;
2790 + struct ext3_free_metadata *md;
2791 + struct ext3_buddy e3b;
2793 + if (list_empty(&sbi->s_committed_transaction))
2796 + /* there is committed blocks to be freed yet */
2798 + /* get next array of blocks */
2800 + spin_lock(&sbi->s_md_lock);
2801 + if (!list_empty(&sbi->s_committed_transaction)) {
2802 + md = list_entry(sbi->s_committed_transaction.next,
2803 + struct ext3_free_metadata, list);
2804 + list_del(&md->list);
2806 + spin_unlock(&sbi->s_md_lock);
2811 + mb_debug("gonna free %u blocks in group %u (0x%p):",
2812 + md->num, md->group, md);
2814 + err = ext3_mb_load_buddy(sb, md->group, &e3b);
2815 + /* we expect to find existing buddy because it's pinned */
2818 + /* there are blocks to put in buddy to make them really free */
2821 + ext3_lock_group(sb, md->group);
2822 + for (i = 0; i < md->num; i++) {
2823 + mb_debug(" %u", md->blocks[i]);
2824 + err = mb_free_blocks(NULL, &e3b, md->blocks[i], 1);
2828 + ext3_unlock_group(sb, md->group);
2830 + /* balance refcounts from ext3_mb_free_metadata() */
2831 + page_cache_release(e3b.bd_buddy_page);
2832 + page_cache_release(e3b.bd_bitmap_page);
2835 + ext3_mb_release_desc(&e3b);
2839 + mb_debug("freed %u blocks in %u structures\n", count, count2);
2842 +#define EXT3_MB_STATS_NAME "stats"
2843 +#define EXT3_MB_MAX_TO_SCAN_NAME "max_to_scan"
2844 +#define EXT3_MB_MIN_TO_SCAN_NAME "min_to_scan"
2845 +#define EXT3_MB_ORDER2_REQ "order2_req"
2846 +#define EXT3_MB_STREAM_REQ "stream_req"
2848 +static int ext3_mb_stats_read(char *page, char **start, off_t off,
2849 + int count, int *eof, void *data)
2851 + struct ext3_sb_info *sbi = data;
2858 + len = sprintf(page, "%ld\n", sbi->s_mb_stats);
2863 +static int ext3_mb_stats_write(struct file *file, const char *buffer,
2864 + unsigned long count, void *data)
2866 + struct ext3_sb_info *sbi = data;
2869 + if (count >= sizeof(str)) {
2870 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2871 + EXT3_MB_STATS_NAME, (int)sizeof(str));
2872 + return -EOVERFLOW;
2875 + if (copy_from_user(str, buffer, count))
2878 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2879 + sbi->s_mb_stats = (simple_strtol(str, NULL, 0) != 0);
2883 +static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
2884 + int count, int *eof, void *data)
2886 + struct ext3_sb_info *sbi = data;
2893 + len = sprintf(page, "%ld\n", sbi->s_mb_max_to_scan);
2898 +static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
2899 + unsigned long count, void *data)
2901 + struct ext3_sb_info *sbi = data;
2905 + if (count >= sizeof(str)) {
2906 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2907 + EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
2908 + return -EOVERFLOW;
2911 + if (copy_from_user(str, buffer, count))
2914 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2915 + value = simple_strtol(str, NULL, 0);
2919 + sbi->s_mb_max_to_scan = value;
2924 +static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
2925 + int count, int *eof, void *data)
2927 + struct ext3_sb_info *sbi = data;
2934 + len = sprintf(page, "%ld\n", sbi->s_mb_min_to_scan);
2939 +static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
2940 + unsigned long count, void *data)
2942 + struct ext3_sb_info *sbi = data;
2946 + if (count >= sizeof(str)) {
2947 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2948 + EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
2949 + return -EOVERFLOW;
2952 + if (copy_from_user(str, buffer, count))
2955 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2956 + value = simple_strtol(str, NULL, 0);
2960 + sbi->s_mb_order2_reqs = value;
2965 +static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
2966 + int count, int *eof, void *data)
2968 + struct ext3_sb_info *sbi = data;
2975 + len = sprintf(page, "%ld\n", sbi->s_mb_order2_reqs);
2980 +static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
2981 + unsigned long count, void *data)
2983 + struct ext3_sb_info *sbi = data;
2987 + if (count >= sizeof(str)) {
2988 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
2989 + EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
2990 + return -EOVERFLOW;
2993 + if (copy_from_user(str, buffer, count))
2996 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
2997 + value = simple_strtol(str, NULL, 0);
3001 + sbi->s_mb_min_to_scan = value;
3006 +static int ext3_mb_stream_req_read(char *page, char **start, off_t off,
3007 + int count, int *eof, void *data)
3009 + struct ext3_sb_info *sbi = data;
3016 + len = sprintf(page, "%ld\n", sbi->s_mb_stream_request);
3021 +static int ext3_mb_stream_req_write(struct file *file, const char *buffer,
3022 + unsigned long count, void *data)
3024 + struct ext3_sb_info *sbi = data;
3028 + if (count >= sizeof(str)) {
3029 + printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
3030 + EXT3_MB_STREAM_REQ, (int)sizeof(str));
3031 + return -EOVERFLOW;
3034 + if (copy_from_user(str, buffer, count))
3037 + /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
3038 + value = simple_strtol(str, NULL, 0);
3042 + sbi->s_mb_stream_request = value;
3047 +int ext3_mb_init_per_dev_proc(struct super_block *sb)
3049 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3050 + mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
3051 + struct proc_dir_entry *proc;
3054 + name = EXT3_MB_STATS_NAME;
3055 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3059 + proc->read_proc = ext3_mb_stats_read;
3060 + proc->write_proc = ext3_mb_stats_write;
3062 + name = EXT3_MB_MAX_TO_SCAN_NAME;
3063 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3067 + proc->read_proc = ext3_mb_max_to_scan_read;
3068 + proc->write_proc = ext3_mb_max_to_scan_write;
3070 + name = EXT3_MB_MIN_TO_SCAN_NAME;
3071 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3075 + proc->read_proc = ext3_mb_min_to_scan_read;
3076 + proc->write_proc = ext3_mb_min_to_scan_write;
3078 + name = EXT3_MB_ORDER2_REQ;
3079 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3083 + proc->read_proc = ext3_mb_order2_req_read;
3084 + proc->write_proc = ext3_mb_order2_req_write;
3086 + name = EXT3_MB_STREAM_REQ;
3087 + proc = create_proc_entry(name, mode, sbi->s_dev_proc);
3091 + proc->read_proc = ext3_mb_stream_req_read;
3092 + proc->write_proc = ext3_mb_stream_req_write;
3097 + printk(KERN_ERR "EXT3-fs: Unable to create %s\n", name);
3098 + remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_dev_proc);
3099 + remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_dev_proc);
3100 + remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_dev_proc);
3101 + remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_dev_proc);
3102 + remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_dev_proc);
3107 +int ext3_mb_destroy_per_dev_proc(struct super_block *sb)
3109 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3111 + if (sbi->s_dev_proc == NULL)
3114 + remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_dev_proc);
3115 + remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_dev_proc);
3116 + remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_dev_proc);
3117 + remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_dev_proc);
3118 + remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_dev_proc);
3123 +int __init init_ext3_mb_proc(void)
3125 + ext3_pspace_cachep =
3126 + kmem_cache_create("ext3_prealloc_space",
3127 + sizeof(struct ext3_prealloc_space),
3128 + 0, SLAB_RECLAIM_ACCOUNT, NULL, NULL);
3129 + if (ext3_pspace_cachep == NULL)
3135 +void exit_ext3_mb_proc(void)
3137 + /* XXX: synchronize_rcu(); */
3138 + kmem_cache_destroy(ext3_pspace_cachep);
3143 + * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
3144 + * Returns 0 if success or error code
3146 +int ext3_mb_mark_diskspace_used(struct ext3_allocation_context *ac, handle_t *handle)
3148 + struct buffer_head *bitmap_bh = NULL;
3149 + struct ext3_super_block *es;
3150 + struct ext3_group_desc *gdp;
3151 + struct buffer_head *gdp_bh;
3152 + struct ext3_sb_info *sbi;
3153 + struct super_block *sb;
3157 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3158 + BUG_ON(ac->ac_b_ex.fe_len <= 0);
3161 + sbi = EXT3_SB(sb);
3164 + ext3_debug("using block group %d(%d)\n", ac->ac_b_group.group,
3165 + gdp->bg_free_blocks_count);
3168 + bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3172 + err = ext3_journal_get_write_access(handle, bitmap_bh);
3177 + gdp = ext3_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3181 + err = ext3_journal_get_write_access(handle, gdp_bh);
3185 + block = ac->ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
3186 + + ac->ac_b_ex.fe_start
3187 + + le32_to_cpu(es->s_first_data_block);
3189 + if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
3190 + block == le32_to_cpu(gdp->bg_inode_bitmap) ||
3191 + in_range(block, le32_to_cpu(gdp->bg_inode_table),
3192 + EXT3_SB(sb)->s_itb_per_group))
3193 + ext3_error(sb, __FUNCTION__,
3194 + "Allocating block in system zone - block = %lu",
3195 + (unsigned long) block);
3196 +#ifdef AGGRESSIVE_CHECK
3199 + for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3200 + BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3201 + bitmap_bh->b_data));
3205 + mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
3206 + ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
3208 + spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3209 + gdp->bg_free_blocks_count =
3210 + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
3211 + - ac->ac_b_ex.fe_len);
3212 + spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3213 + percpu_counter_mod(&sbi->s_freeblocks_counter, - ac->ac_b_ex.fe_len);
3215 + err = ext3_journal_dirty_metadata(handle, bitmap_bh);
3218 + err = ext3_journal_dirty_metadata(handle, gdp_bh);
3222 + brelse(bitmap_bh);
3227 + * here we normalize request for locality group
3228 + * XXX: should we try to preallocate more than the group has now?
3230 +void ext3_mb_normalize_group_request(struct ext3_allocation_context *ac)
3232 + struct super_block *sb = ac->ac_sb;
3233 + struct ext3_locality_group *lg = ac->ac_lg;
3235 + BUG_ON(lg == NULL);
3236 + if (EXT3_SB(sb)->s_stripe)
3237 + ac->ac_g_ex.fe_len = EXT3_SB(sb)->s_stripe;
3239 + ac->ac_g_ex.fe_len = (1024 * 1024) >> sb->s_blocksize_bits;
3241 + mb_debug("#%u: goal %u blocks for locality group\n",
3242 + current->pid, ac->ac_g_ex.fe_len);
3246 + * Normalization means making request better in terms of
3247 + * size and alignment
3249 +void ext3_mb_normalize_request(struct ext3_allocation_context *ac,
3250 + struct ext3_allocation_request *ar)
3252 + struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
3253 + loff_t start, end, size, orig_size, orig_start;
3254 + struct list_head *cur;
3257 + /* do normalize only data requests, metadata requests
3258 + do not need preallocation */
3259 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
3262 + /* sometime caller may want exact blocks */
3263 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
3266 + /* caller may indicate that preallocation isn't
3267 + * required (it's a tail, for example) */
3268 + if (ac->ac_flags & EXT3_MB_HINT_NOPREALLOC)
3271 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
3272 + return ext3_mb_normalize_group_request(ac);
3274 + bsbits = ac->ac_sb->s_blocksize_bits;
3276 + /* first, let's learn actual file size
3277 + * given current request is allocated */
3278 + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3279 + size = size << bsbits;
3280 + if (size < i_size_read(ac->ac_inode))
3281 + size = i_size_read(ac->ac_inode);
3283 + /* max available blocks in a free group */
3284 + max = EXT3_BLOCKS_PER_GROUP(ac->ac_sb) - 1 - 1
3285 + - EXT3_SB(ac->ac_sb)->s_itb_per_group;
3287 +#define NRL_CHECK_SIZE(req,size,max,bits) \
3288 + (req <= (size) || max <= ((size) >> bits))
3290 + /* first, try to predict filesize */
3291 + /* XXX: should this table be tunable? */
3293 + if (size <= 16 * 1024) {
3295 + } else if (size <= 32 * 1024) {
3297 + } else if (size <= 64 * 1024) {
3299 + } else if (size <= 128 * 1024) {
3300 + size = 128 * 1024;
3301 + } else if (size <= 256 * 1024) {
3302 + size = 256 * 1024;
3303 + } else if (size <= 512 * 1024) {
3304 + size = 512 * 1024;
3305 + } else if (size <= 1024 * 1024) {
3306 + size = 1024 * 1024;
3307 + } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, bsbits)) {
3308 + start = ac->ac_o_ex.fe_logical << bsbits;
3309 + start = (start / (1024 * 1024)) * (1024 * 1024);
3310 + size = 1024 * 1024;
3311 + } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, bsbits)) {
3312 + start = ac->ac_o_ex.fe_logical << bsbits;
3313 + start = (start / (4 * (1024 * 1024))) * 4 * (1024 * 1024);
3314 + size = 4 * 1024 * 1024;
3315 + } else if(NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,(8<<20)>>bsbits,max,bsbits)){
3316 + start = ac->ac_o_ex.fe_logical;
3317 + start = start << bsbits;
3318 + start = (start / (8 * (1024 * 1024))) * 8 * (1024 * 1024);
3319 + size = 8 * 1024 * 1024;
3321 + start = ac->ac_o_ex.fe_logical;
3322 + start = start << bsbits;
3323 + size = ac->ac_o_ex.fe_len << bsbits;
3325 + orig_size = size = size >> bsbits;
3326 + orig_start = start = start >> bsbits;
3328 + /* don't cover already allocated blocks in selected range */
3329 + if (ar->pleft && start <= ar->lleft) {
3330 + size -= ar->lleft + 1 - start;
3331 + start = ar->lleft + 1;
3333 + if (ar->pright && start + size - 1 >= ar->lright)
3334 + size -= start + size - ar->lright;
3336 + end = start + size;
3338 + /* check we don't cross already preallocated blocks */
3340 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3341 + struct ext3_prealloc_space *pa;
3342 + unsigned long pa_end;
3344 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3346 + if (pa->pa_deleted)
3348 + spin_lock(&pa->pa_lock);
3349 + if (pa->pa_deleted) {
3350 + spin_unlock(&pa->pa_lock);
3354 + pa_end = pa->pa_lstart + pa->pa_len;
3356 + /* PA must not overlap original request */
3357 + BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3358 + ac->ac_o_ex.fe_logical < pa->pa_lstart));
3360 + /* skip PA normalized request doesn't overlap with */
3361 + if (pa->pa_lstart >= end) {
3362 + spin_unlock(&pa->pa_lock);
3365 + if (pa_end <= start) {
3366 + spin_unlock(&pa->pa_lock);
3369 + BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3371 + if (pa_end <= ac->ac_o_ex.fe_logical) {
3372 + BUG_ON(pa_end < start);
3376 + if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3377 + BUG_ON(pa->pa_lstart > end);
3378 + end = pa->pa_lstart;
3380 + spin_unlock(&pa->pa_lock);
3382 + rcu_read_unlock();
3383 + size = end - start;
3385 + /* XXX: extra loop to check we really don't overlap preallocations */
3387 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3388 + struct ext3_prealloc_space *pa;
3389 + unsigned long pa_end;
3390 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3391 + spin_lock(&pa->pa_lock);
3392 + if (pa->pa_deleted == 0) {
3393 + pa_end = pa->pa_lstart + pa->pa_len;
3394 + BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3396 + spin_unlock(&pa->pa_lock);
3398 + rcu_read_unlock();
3400 + if (start + size <= ac->ac_o_ex.fe_logical &&
3401 + start > ac->ac_o_ex.fe_logical) {
3402 + printk("start %lu, size %lu, fe_logical %lu\n",
3403 + (unsigned long) start, (unsigned long) size,
3404 + (unsigned long) ac->ac_o_ex.fe_logical);
3406 + BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3407 + start > ac->ac_o_ex.fe_logical);
3409 + /* now prepare goal request */
3410 + BUG_ON(size <= 0 || size >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
3411 + if (size < ac->ac_o_ex.fe_len) {
3412 + /* XXX: don't normalize tails? */
3415 + /* XXX: is it better to align blocks WRT to logical placement
3416 + * or satisfy big request as is */
3417 + ac->ac_g_ex.fe_logical = start;
3418 + ac->ac_g_ex.fe_len = size;
3420 + mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3421 + (unsigned) orig_size, (unsigned) start);
3424 +void ext3_mb_collect_stats(struct ext3_allocation_context *ac)
3426 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
3428 + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3429 + atomic_inc(&sbi->s_bal_reqs);
3430 + atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3431 + if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3432 + atomic_inc(&sbi->s_bal_success);
3433 + atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3434 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3435 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3436 + atomic_inc(&sbi->s_bal_goals);
3437 + if (ac->ac_found > sbi->s_mb_max_to_scan)
3438 + atomic_inc(&sbi->s_bal_breaks);
3441 + ext3_mb_store_history(ac);
3445 + * use blocks preallocated to inode
3447 +void ext3_mb_use_inode_pa(struct ext3_allocation_context *ac,
3448 + struct ext3_prealloc_space *pa)
3450 + unsigned long start, len;
3452 + /* found preallocated blocks, use them */
3453 + start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3454 + len = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3455 + len = len - start;
3456 + ext3_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3457 + &ac->ac_b_ex.fe_start);
3458 + ac->ac_b_ex.fe_len = len;
3459 + ac->ac_status = AC_STATUS_FOUND;
3462 + BUG_ON(start < pa->pa_pstart);
3463 + BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3464 + BUG_ON(pa->pa_free < len);
3465 + pa->pa_free -= len;
3467 + mb_debug("use %lu/%lu from inode pa %p\n", start, len, pa);
3471 + * use blocks preallocated to locality group
3473 +void ext3_mb_use_group_pa(struct ext3_allocation_context *ac,
3474 + struct ext3_prealloc_space *pa)
3476 + unsigned len = ac->ac_o_ex.fe_len;
3478 + ext3_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3479 + &ac->ac_b_ex.fe_group,
3480 + &ac->ac_b_ex.fe_start);
3481 + ac->ac_b_ex.fe_len = len;
3482 + ac->ac_status = AC_STATUS_FOUND;
3485 + /* we don't correct pa_pstart or pa_plen here to avoid
3486 + * possible race when tte group is being loaded concurrently
3487 + * instead we correct pa later, after blocks are marked
3488 + * in on-disk bitmap -- see ext3_mb_release_context() */
3489 + mb_debug("use %lu/%lu from group pa %p\n", pa->pa_lstart-len, len, pa);
3493 + * search goal blocks in preallocated space
3495 +int ext3_mb_use_preallocated(struct ext3_allocation_context *ac)
3497 + struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
3498 + struct ext3_locality_group *lg;
3499 + struct ext3_prealloc_space *pa;
3500 + struct list_head *cur;
3502 + /* only data can be preallocated */
3503 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
3506 + /* first, try per-file preallocation */
3508 + list_for_each_rcu(cur, &ei->i_prealloc_list) {
3509 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3511 + /* all fields in this condition don't change,
3512 + * so we can skip locking for them */
3513 + if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3514 + ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3517 + /* found preallocated blocks, use them */
3518 + spin_lock(&pa->pa_lock);
3519 + if (pa->pa_deleted == 0 && pa->pa_free) {
3520 + atomic_inc(&pa->pa_count);
3521 + ext3_mb_use_inode_pa(ac, pa);
3522 + spin_unlock(&pa->pa_lock);
3523 + ac->ac_criteria = 10;
3524 + rcu_read_unlock();
3527 + spin_unlock(&pa->pa_lock);
3529 + rcu_read_unlock();
3531 + /* can we use group allocation? */
3532 + if (!(ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC))
3535 + /* inode may have no locality group for some reason */
3541 + list_for_each_rcu(cur, &lg->lg_prealloc_list) {
3542 + pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
3543 + spin_lock(&pa->pa_lock);
3544 + if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
3545 + atomic_inc(&pa->pa_count);
3546 + ext3_mb_use_group_pa(ac, pa);
3547 + spin_unlock(&pa->pa_lock);
3548 + ac->ac_criteria = 20;
3549 + rcu_read_unlock();
3552 + spin_unlock(&pa->pa_lock);
3554 + rcu_read_unlock();
3560 + * the function goes through all preallocation in this group and marks them
3561 + * used in in-core bitmap. buddy must be generated from this bitmap
3563 +void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group)
3565 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
3566 + struct ext3_prealloc_space *pa;
3567 + struct list_head *cur;
3568 + unsigned long groupnr;
3569 + unsigned long start;
3570 + int preallocated = 0, count = 0, len;
3572 + /* all form of preallocation discards first load group,
3573 + * so the only competing code is preallocation use.
3574 + * we don't need any locking here
3575 + * notice we do NOT ignore preallocations with pa_deleted
3576 + * otherwise we could leave used blocks available for
3577 + * allocation in buddy when concurrent ext3_mb_put_pa()
3578 + * is dropping preallocation
3580 + list_for_each_rcu(cur, &grp->bb_prealloc_list) {
3581 + pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
3582 + spin_lock(&pa->pa_lock);
3583 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start);
3585 + spin_unlock(&pa->pa_lock);
3586 + if (unlikely(len == 0))
3588 + BUG_ON(groupnr != group && len != 0);
3589 + mb_set_bits(sb_bgl_lock(EXT3_SB(sb), group), bitmap, start,len);
3590 + preallocated += len;
3593 + mb_debug("prellocated %u for group %u\n", preallocated, group);
3596 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3597 +static void ext3_mb_pa_callback(struct rcu_head *head)
3599 + struct ext3_prealloc_space *pa;
3600 + pa = container_of(head, struct ext3_prealloc_space, u.pa_rcu);
3601 + kmem_cache_free(ext3_pspace_cachep, pa);
3603 +#define mb_call_rcu(__pa) call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback)
3605 +static void ext3_mb_pa_callback(void *pa)
3607 + kmem_cache_free(ext3_pspace_cachep, pa);
3609 +#define mb_call_rcu(__pa) call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback, pa)
3613 + * drops a reference to preallocated space descriptor
3614 + * if this was the last reference and the space is consumed
3616 +void ext3_mb_put_pa(struct ext3_allocation_context *ac,
3617 + struct super_block *sb, struct ext3_prealloc_space *pa)
3619 + unsigned long grp;
3621 + if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3624 + /* in this short window concurrent discard can set pa_deleted */
3625 + spin_lock(&pa->pa_lock);
3626 + if (pa->pa_deleted == 0) {
3627 + spin_unlock(&pa->pa_lock);
3631 + pa->pa_deleted = 1;
3632 + spin_unlock(&pa->pa_lock);
3634 + /* -1 is to protect from crossing allocation group */
3635 + ext3_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3640 + * P1 (buddy init) P2 (regular allocation)
3641 + * find block B in PA
3642 + * copy on-disk bitmap to buddy
3643 + * mark B in on-disk bitmap
3644 + * drop PA from group
3645 + * mark all PAs in buddy
3647 + * thus, P1 initializes buddy with B available. to prevent this
3648 + * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3649 + * against that pair
3651 + ext3_lock_group(sb, grp);
3652 + list_del_rcu(&pa->pa_group_list);
3653 + ext3_unlock_group(sb, grp);
3655 + spin_lock(pa->pa_obj_lock);
3656 + list_del_rcu(&pa->pa_inode_list);
3657 + spin_unlock(pa->pa_obj_lock);
3663 + * creates new preallocated space for given inode
3665 +int ext3_mb_new_inode_pa(struct ext3_allocation_context *ac)
3667 + struct super_block *sb = ac->ac_sb;
3668 + struct ext3_prealloc_space *pa;
3669 + struct ext3_group_info *grp;
3670 + struct ext3_inode_info *ei;
3672 + /* preallocate only when found space is larger then requested */
3673 + BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3674 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3675 + BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3677 + pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
3681 + if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3682 + int winl, wins, win, offs;
3684 + /* we can't allocate as much as normalizer wants.
3685 + * so, found space must get proper lstart
3686 + * to cover original request */
3687 + BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3688 + BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3690 + /* we're limited by original request in that
3691 + * logical block must be covered any way
3692 + * winl is window we can move our chunk within */
3693 + winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3695 + /* also, we should cover whole original request */
3696 + wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3698 + /* the smallest one defines real window */
3699 + win = min(winl, wins);
3701 + offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3702 + if (offs && offs < win)
3705 + ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3706 + BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3707 + BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3710 + /* preallocation can change ac_b_ex, thus we store actually
3711 + * allocated blocks for history */
3712 + ac->ac_f_ex = ac->ac_b_ex;
3714 + pa->pa_lstart = ac->ac_b_ex.fe_logical;
3715 + pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
3716 + pa->pa_len = ac->ac_b_ex.fe_len;
3717 + pa->pa_free = pa->pa_len;
3718 + atomic_set(&pa->pa_count, 1);
3719 + spin_lock_init(&pa->pa_lock);
3720 + pa->pa_deleted = 0;
3721 + pa->pa_linear = 0;
3723 + mb_debug("new inode pa %p: %lu/%lu for %lu\n", pa,
3724 + pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3726 + ext3_mb_use_inode_pa(ac, pa);
3727 + atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
3729 + ei = EXT3_I(ac->ac_inode);
3730 + grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
3732 + pa->pa_obj_lock = &ei->i_prealloc_lock;
3733 + pa->pa_inode = ac->ac_inode;
3735 + ext3_lock_group(sb, ac->ac_b_ex.fe_group);
3736 + list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
3737 + ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
3739 + spin_lock(pa->pa_obj_lock);
3740 + list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3741 + spin_unlock(pa->pa_obj_lock);
3747 + * creates new preallocated space for locality group inodes belongs to
3749 +int ext3_mb_new_group_pa(struct ext3_allocation_context *ac)
3751 + struct super_block *sb = ac->ac_sb;
3752 + struct ext3_locality_group *lg;
3753 + struct ext3_prealloc_space *pa;
3754 + struct ext3_group_info *grp;
3756 + /* preallocate only when found space is larger then requested */
3757 + BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3758 + BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3759 + BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3761 + BUG_ON(ext3_pspace_cachep == NULL);
3762 + pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
3766 + /* preallocation can change ac_b_ex, thus we store actually
3767 + * allocated blocks for history */
3768 + ac->ac_f_ex = ac->ac_b_ex;
3770 + pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
3771 + pa->pa_lstart = pa->pa_pstart;
3772 + pa->pa_len = ac->ac_b_ex.fe_len;
3773 + pa->pa_free = pa->pa_len;
3774 + atomic_set(&pa->pa_count, 1);
3775 + spin_lock_init(&pa->pa_lock);
3776 + pa->pa_deleted = 0;
3777 + pa->pa_linear = 1;
3779 + mb_debug("new group pa %p: %lu/%lu for %lu\n", pa,
3780 + pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3782 + ext3_mb_use_group_pa(ac, pa);
3783 + atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
3785 + grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
3787 + BUG_ON(lg == NULL);
3789 + pa->pa_obj_lock = &lg->lg_prealloc_lock;
3790 + pa->pa_inode = NULL;
3792 + ext3_lock_group(sb, ac->ac_b_ex.fe_group);
3793 + list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
3794 + ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
3796 + spin_lock(pa->pa_obj_lock);
3797 + list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
3798 + spin_unlock(pa->pa_obj_lock);
3803 +int ext3_mb_new_preallocation(struct ext3_allocation_context *ac)
3807 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
3808 + err = ext3_mb_new_group_pa(ac);
3810 + err = ext3_mb_new_inode_pa(ac);
3815 + * finds all unused blocks in on-disk bitmap, frees them in
3816 + * in-core bitmap and buddy.
3817 + * @pa must be unlinked from inode and group lists, so that
3818 + * nobody else can find/use it.
3819 + * the caller MUST hold group/inode locks.
3820 + * TODO: optimize the case when there are no in-core structures yet
3822 +int ext3_mb_release_inode_pa(struct ext3_buddy *e3b,
3823 + struct buffer_head *bitmap_bh,
3824 + struct ext3_prealloc_space *pa)
3826 + struct ext3_allocation_context ac;
3827 + struct super_block *sb = e3b->bd_sb;
3828 + struct ext3_sb_info *sbi = EXT3_SB(sb);
3829 + unsigned long bit, end, next, group;
3831 + int err = 0, free = 0;
3833 + BUG_ON(pa->pa_deleted == 0);
3834 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3835 + BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
3836 + end = bit + pa->pa_len;
3839 + ac.ac_inode = pa->pa_inode;
3840 + ac.ac_op = EXT3_MB_HISTORY_DISCARD;
3842 + while (bit < end) {
3843 + bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3846 + next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3849 + start = group * EXT3_BLOCKS_PER_GROUP(sb) + bit +
3850 + le32_to_cpu(sbi->s_es->s_first_data_block);
3851 + mb_debug(" free preallocated %u/%u in group %u\n",
3852 + (unsigned) start, (unsigned) next - bit,
3853 + (unsigned) group);
3854 + free += next - bit;
3856 + ac.ac_b_ex.fe_group = group;
3857 + ac.ac_b_ex.fe_start = bit;
3858 + ac.ac_b_ex.fe_len = next - bit;
3859 + ac.ac_b_ex.fe_logical = 0;
3860 + ext3_mb_store_history(&ac);
3862 + mb_free_blocks(pa->pa_inode, e3b, bit, next - bit);
3865 + if (free != pa->pa_free) {
3866 + printk("pa %p: logic %lu, phys. %lu, len %lu\n",
3867 + pa, (unsigned long) pa->pa_lstart,
3868 + (unsigned long) pa->pa_pstart,
3869 + (unsigned long) pa->pa_len);
3870 + printk("free %u, pa_free %u\n", free, pa->pa_free);
3872 + BUG_ON(free != pa->pa_free);
3873 + atomic_add(free, &sbi->s_mb_discarded);
3878 +int ext3_mb_release_group_pa(struct ext3_buddy *e3b,
3879 + struct ext3_prealloc_space *pa)
3881 + struct ext3_allocation_context ac;
3882 + struct super_block *sb = e3b->bd_sb;
3883 + unsigned long bit, group;
3885 + ac.ac_op = EXT3_MB_HISTORY_DISCARD;
3887 + BUG_ON(pa->pa_deleted == 0);
3888 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3889 + BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
3890 + mb_free_blocks(pa->pa_inode, e3b, bit, pa->pa_len);
3891 + atomic_add(pa->pa_len, &EXT3_SB(sb)->s_mb_discarded);
3894 + ac.ac_inode = NULL;
3895 + ac.ac_b_ex.fe_group = group;
3896 + ac.ac_b_ex.fe_start = bit;
3897 + ac.ac_b_ex.fe_len = pa->pa_len;
3898 + ac.ac_b_ex.fe_logical = 0;
3899 + ext3_mb_store_history(&ac);
3905 + * releases all preallocations in given group
3907 + * first, we need to decide discard policy:
3908 + * - when do we discard
3910 + * - how many do we discard
3911 + * 1) how many requested
3913 +int ext3_mb_discard_group_preallocations(struct super_block *sb,
3914 + int group, int needed)
3916 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
3917 + struct buffer_head *bitmap_bh = NULL;
3918 + struct ext3_prealloc_space *pa, *tmp;
3919 + struct list_head list;
3920 + struct ext3_buddy e3b;
3921 + int err, busy, free = 0;
3923 + mb_debug("discard preallocation for group %lu\n", group);
3925 + if (list_empty(&grp->bb_prealloc_list))
3928 + bitmap_bh = read_block_bitmap(sb, group);
3929 + if (bitmap_bh == NULL) {
3930 + /* error handling here */
3931 + ext3_mb_release_desc(&e3b);
3932 + BUG_ON(bitmap_bh == NULL);
3935 + err = ext3_mb_load_buddy(sb, group, &e3b);
3936 + BUG_ON(err != 0); /* error handling here */
3939 + needed = EXT3_BLOCKS_PER_GROUP(sb) + 1;
3941 + grp = EXT3_GROUP_INFO(sb, group);
3942 + INIT_LIST_HEAD(&list);
3946 + ext3_lock_group(sb, group);
3947 + list_for_each_entry_safe (pa, tmp, &grp->bb_prealloc_list, pa_group_list) {
3948 + spin_lock(&pa->pa_lock);
3949 + if (atomic_read(&pa->pa_count)) {
3950 + spin_unlock(&pa->pa_lock);
3954 + if (pa->pa_deleted) {
3955 + spin_unlock(&pa->pa_lock);
3959 + /* seems this one can be freed ... */
3960 + pa->pa_deleted = 1;
3962 + /* we can trust pa_free ... */
3963 + free += pa->pa_free;
3965 + spin_unlock(&pa->pa_lock);
3967 + list_del_rcu(&pa->pa_group_list);
3968 + list_add(&pa->u.pa_tmp_list, &list);
3971 + /* if we still need more blocks and some PAs were used, try again */
3972 + if (free < needed && busy) {
3973 + ext3_unlock_group(sb, group);
3977 + /* found anything to free? */
3978 + if (list_empty(&list)) {
3979 + BUG_ON(free != 0);
3983 + /* now free all selected PAs */
3984 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3986 + /* remove from object (inode or locality group) */
3987 + spin_lock(pa->pa_obj_lock);
3988 + list_del_rcu(&pa->pa_inode_list);
3989 + spin_unlock(pa->pa_obj_lock);
3991 + if (pa->pa_linear)
3992 + ext3_mb_release_group_pa(&e3b, pa);
3994 + ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
3996 + list_del(&pa->u.pa_tmp_list);
4001 + ext3_unlock_group(sb, group);
4002 + ext3_mb_release_desc(&e3b);
4003 + brelse(bitmap_bh);
4008 + * releases all non-used preallocated blocks for given inode
4010 +void ext3_mb_discard_inode_preallocations(struct inode *inode)
4012 + struct ext3_inode_info *ei = EXT3_I(inode);
4013 + struct super_block *sb = inode->i_sb;
4014 + struct buffer_head *bitmap_bh = NULL;
4015 + struct ext3_prealloc_space *pa, *tmp;
4016 + unsigned long group = 0;
4017 + struct list_head list;
4018 + struct ext3_buddy e3b;
4021 + if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
4022 + /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4026 + mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
4028 + INIT_LIST_HEAD(&list);
4031 + /* first, collect all pa's in the inode */
4032 + spin_lock(&ei->i_prealloc_lock);
4033 + while (!list_empty(&ei->i_prealloc_list)) {
4034 + pa = list_entry(ei->i_prealloc_list.next,
4035 + struct ext3_prealloc_space, pa_inode_list);
4036 + BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4037 + spin_lock(&pa->pa_lock);
4038 + if (atomic_read(&pa->pa_count)) {
4039 + /* this shouldn't happen often - nobody should
4040 + * use preallocation while we're discarding it */
4041 + spin_unlock(&pa->pa_lock);
4042 + spin_unlock(&ei->i_prealloc_lock);
4043 + current->state = TASK_UNINTERRUPTIBLE;
4044 + schedule_timeout(HZ);
4048 + if (pa->pa_deleted == 0) {
4049 + pa->pa_deleted = 1;
4050 + spin_unlock(&pa->pa_lock);
4051 + list_del_rcu(&pa->pa_inode_list);
4052 + list_add(&pa->u.pa_tmp_list, &list);
4056 + /* someone is deleting pa right now */
4057 + spin_unlock(&pa->pa_lock);
4058 + spin_unlock(&ei->i_prealloc_lock);
4060 + /* we have to wait here because pa_deleted
4061 + * doesn't mean pa is already unlinked from
4062 + * the list. as we might be called from
4063 + * ->clear_inode() the inode will get freed
4064 + * and concurrent thread which is unlinking
4065 + * pa from inode's list may access already
4066 + * freed memory, bad-bad-bad */
4068 + /* XXX: if this happens too often, we can
4069 + * add a flag to force wait only in case
4070 + * of ->clear_inode(), but not in case of
4071 + * regular truncate */
4072 + current->state = TASK_UNINTERRUPTIBLE;
4073 + schedule_timeout(HZ);
4076 + spin_unlock(&ei->i_prealloc_lock);
4078 + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4079 + BUG_ON(pa->pa_linear != 0);
4080 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4082 + err = ext3_mb_load_buddy(sb, group, &e3b);
4083 + BUG_ON(err != 0); /* error handling here */
4085 + bitmap_bh = read_block_bitmap(sb, group);
4087 + ext3_lock_group(sb, group);
4088 + list_del_rcu(&pa->pa_group_list);
4090 + /* can be NULL due to IO error, at worst
4091 + * we leave some free blocks unavailable
4092 + * do not go RO - no need for */
4093 + if (bitmap_bh != NULL)
4094 + ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
4095 + ext3_unlock_group(sb, group);
4097 + ext3_mb_release_desc(&e3b);
4098 + brelse(bitmap_bh);
4100 + list_del(&pa->u.pa_tmp_list);
4106 + * finds all preallocated spaces and return blocks being freed to them
4107 + * if preallocated space becomes full (no block is used from the space)
4108 + * then the function frees space in buddy
4109 + * XXX: at the moment, truncate (which is the only way to free blocks)
4110 + * discards all preallocations
4112 +void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
4113 + sector_t block, int count)
4115 + BUG_ON(!list_empty(&EXT3_I(inode)->i_prealloc_list));
4118 +void ext3_mb_show_ac(struct ext3_allocation_context *ac)
4121 + struct super_block *sb = ac->ac_sb;
4124 + printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
4125 + ac->ac_status, ac->ac_flags);
4126 + printk(KERN_ERR "EXT3-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4127 + "best %lu/%lu/%lu@%lu cr %d\n",
4128 + ac->ac_o_ex.fe_group, ac->ac_o_ex.fe_start,
4129 + ac->ac_o_ex.fe_len, ac->ac_o_ex.fe_logical,
4130 + ac->ac_g_ex.fe_group, ac->ac_g_ex.fe_start,
4131 + ac->ac_g_ex.fe_len, ac->ac_g_ex.fe_logical,
4132 + ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
4133 + ac->ac_b_ex.fe_len, ac->ac_b_ex.fe_logical,
4135 + printk(KERN_ERR "EXT3-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4137 + printk("EXT3-fs: groups: ");
4138 + for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
4139 + struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, i);
4140 + struct ext3_prealloc_space *pa;
4141 + unsigned long start;
4142 + struct list_head *cur;
4143 + list_for_each_rcu(cur, &grp->bb_prealloc_list) {
4144 + pa = list_entry(cur, struct ext3_prealloc_space,
4146 + spin_lock(&pa->pa_lock);
4147 + ext3_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start);
4148 + spin_unlock(&pa->pa_lock);
4149 + printk("PA:%u:%lu:%u ", i, start, pa->pa_len);
4152 + if (grp->bb_free == 0)
4154 + printk("%d: %d/%d ", i, grp->bb_free, grp->bb_fragments);
4161 +void ext3_mb_group_or_file(struct ext3_allocation_context *ac)
4163 + struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
4164 + int bsbits = ac->ac_sb->s_blocksize_bits;
4165 + loff_t size, isize;
4167 + if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
4170 + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4171 + isize = i_size_read(ac->ac_inode) >> bsbits;
4175 + /* don't use group allocation for large files */
4176 + if (size >= sbi->s_mb_stream_request)
4179 + if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
4182 + BUG_ON(ac->ac_lg != NULL);
4183 + ac->ac_lg = &sbi->s_locality_groups[smp_processor_id()];
4185 + /* we're going to use group allocation */
4186 + ac->ac_flags |= EXT3_MB_HINT_GROUP_ALLOC;
4188 + /* serialize all allocations in the group */
4189 + down(&ac->ac_lg->lg_sem);
4192 +int ext3_mb_initialize_context(struct ext3_allocation_context *ac,
4193 + struct ext3_allocation_request *ar)
4195 + struct super_block *sb = ar->inode->i_sb;
4196 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4197 + struct ext3_super_block *es = sbi->s_es;
4198 + unsigned long group, len, goal;
4199 + unsigned long block;
4201 + /* we can't allocate > group size */
4203 + if (len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
4204 + len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
4206 + /* start searching from the goal */
4208 + if (goal < le32_to_cpu(es->s_first_data_block) ||
4209 + goal >= le32_to_cpu(es->s_blocks_count))
4210 + goal = le32_to_cpu(es->s_first_data_block);
4211 + ext3_get_group_no_and_offset(sb, goal, &group, &block);
4213 + /* set up allocation goals */
4214 + ac->ac_b_ex.fe_logical = ar->logical;
4215 + ac->ac_b_ex.fe_group = 0;
4216 + ac->ac_b_ex.fe_start = 0;
4217 + ac->ac_b_ex.fe_len = 0;
4218 + ac->ac_status = AC_STATUS_CONTINUE;
4219 + ac->ac_groups_scanned = 0;
4220 + ac->ac_ex_scanned = 0;
4223 + ac->ac_inode = ar->inode;
4224 + ac->ac_o_ex.fe_logical = ar->logical;
4225 + ac->ac_o_ex.fe_group = group;
4226 + ac->ac_o_ex.fe_start = block;
4227 + ac->ac_o_ex.fe_len = len;
4228 + ac->ac_g_ex.fe_logical = ar->logical;
4229 + ac->ac_g_ex.fe_group = group;
4230 + ac->ac_g_ex.fe_start = block;
4231 + ac->ac_g_ex.fe_len = len;
4232 + ac->ac_f_ex.fe_len = 0;
4233 + ac->ac_flags = ar->flags;
4234 + ac->ac_2order = 0;
4235 + ac->ac_criteria = 0;
4237 + ac->ac_bitmap_page = NULL;
4238 + ac->ac_buddy_page = NULL;
4241 + /* we have to define context: we'll we work with a file or
4242 + * locality group. this is a policy, actually */
4243 + ext3_mb_group_or_file(ac);
4245 + mb_debug("init ac: %u blocks @ %llu, goal %llu, flags %x, 2^%d, "
4246 + "left: %llu/%llu, right %llu/%llu to %swritable\n",
4247 + (unsigned) ar->len, (unsigned) ar->logical,
4248 + (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4249 + (unsigned) ar->lleft, (unsigned) ar->pleft,
4250 + (unsigned) ar->lright, (unsigned) ar->pright,
4251 + atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4257 + * release all resource we used in allocation
4259 +int ext3_mb_release_context(struct ext3_allocation_context *ac)
4262 + if (ac->ac_pa->pa_linear) {
4263 + /* see comment in ext3_mb_use_group_pa() */
4264 + spin_lock(&ac->ac_pa->pa_lock);
4265 + ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
4266 + ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
4267 + ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
4268 + ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
4269 + spin_unlock(&ac->ac_pa->pa_lock);
4271 + ext3_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
4273 + if (ac->ac_bitmap_page)
4274 + page_cache_release(ac->ac_bitmap_page);
4275 + if (ac->ac_buddy_page)
4276 + page_cache_release(ac->ac_buddy_page);
4277 + if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
4278 + up(&ac->ac_lg->lg_sem);
4279 + ext3_mb_collect_stats(ac);
4283 +int ext3_mb_discard_preallocations(struct super_block *sb, int needed)
4285 + int i, ret, freed = 0;
4287 + for (i = 0; i < EXT3_SB(sb)->s_groups_count && needed > 0; i++) {
4288 + ret = ext3_mb_discard_group_preallocations(sb, i, needed);
4297 + * Main entry point into mballoc to allocate blocks
4298 + * it tries to use preallocation first, then falls back
4299 + * to usual allocation
4301 +unsigned long ext3_mb_new_blocks(handle_t *handle,
4302 + struct ext3_allocation_request *ar, int *errp)
4304 + struct ext3_allocation_context ac;
4305 + struct ext3_sb_info *sbi;
4306 + struct super_block *sb;
4307 + unsigned long block = 0;
4308 + int freed, inquota;
4310 + sb = ar->inode->i_sb;
4311 + sbi = EXT3_SB(sb);
4313 + if (!test_opt(sb, MBALLOC)) {
4314 + static int ext3_mballoc_warning = 0;
4315 + if (ext3_mballoc_warning++ == 0)
4316 + printk(KERN_ERR "EXT3-fs: multiblock request with "
4317 + "mballoc disabled!\n");
4319 + block = ext3_new_block_old(handle, ar->inode, ar->goal, errp);
4323 + while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4324 + ar->flags |= EXT3_MB_HINT_NOPREALLOC;
4327 + if (ar->len == 0) {
4331 + inquota = ar->len;
4333 + ext3_mb_poll_new_transaction(sb, handle);
4335 + if ((*errp = ext3_mb_initialize_context(&ac, ar))) {
4340 + ac.ac_op = EXT3_MB_HISTORY_PREALLOC;
4341 + if (!ext3_mb_use_preallocated(&ac)) {
4343 + ac.ac_op = EXT3_MB_HISTORY_ALLOC;
4344 + ext3_mb_normalize_request(&ac, ar);
4347 + /* allocate space in core */
4348 + ext3_mb_regular_allocator(&ac);
4350 + /* as we've just preallocated more space than
4351 + * user requested orinally, we store allocated
4352 + * space in a special descriptor */
4353 + if (ac.ac_status == AC_STATUS_FOUND &&
4354 + ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len)
4355 + ext3_mb_new_preallocation(&ac);
4358 + if (likely(ac.ac_status == AC_STATUS_FOUND)) {
4359 + ext3_mb_mark_diskspace_used(&ac, handle);
4361 + block = ext3_grp_offs_to_block(sb, &ac.ac_b_ex);
4362 + ar->len = ac.ac_b_ex.fe_len;
4364 + freed = ext3_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len);
4368 + ac.ac_b_ex.fe_len = 0;
4370 + ext3_mb_show_ac(&ac);
4373 + ext3_mb_release_context(&ac);
4376 + if (ar->len < inquota)
4377 + DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4381 +EXPORT_SYMBOL(ext3_mb_new_blocks);
4383 +int ext3_new_block(handle_t *handle, struct inode *inode,
4384 + unsigned long goal, int *errp)
4386 + struct ext3_allocation_request ar;
4387 + unsigned long ret;
4389 + if (!test_opt(inode->i_sb, MBALLOC)) {
4390 + ret = ext3_new_block_old(handle, inode, goal, errp);
4403 + ret = ext3_mb_new_blocks(handle, &ar, errp);
4407 +void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
4409 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4411 + if (sbi->s_last_transaction == handle->h_transaction->t_tid)
4414 + /* new transaction! time to close last one and free blocks for
4415 + * committed transaction. we know that only transaction can be
4416 + * active, so previos transaction can be being logged and we
4417 + * know that transaction before previous is known to be already
4418 + * logged. this means that now we may free blocks freed in all
4419 + * transactions before previous one. hope I'm clear enough ... */
4421 + spin_lock(&sbi->s_md_lock);
4422 + if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
4423 + mb_debug("new transaction %lu, old %lu\n",
4424 + (unsigned long) handle->h_transaction->t_tid,
4425 + (unsigned long) sbi->s_last_transaction);
4426 + list_splice_init(&sbi->s_closed_transaction,
4427 + &sbi->s_committed_transaction);
4428 + list_splice_init(&sbi->s_active_transaction,
4429 + &sbi->s_closed_transaction);
4430 + sbi->s_last_transaction = handle->h_transaction->t_tid;
4432 + spin_unlock(&sbi->s_md_lock);
4434 + ext3_mb_free_committed_blocks(sb);
4437 +int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
4438 + int group, int block, int count)
4440 + struct ext3_group_info *db = e3b->bd_info;
4441 + struct super_block *sb = e3b->bd_sb;
4442 + struct ext3_sb_info *sbi = EXT3_SB(sb);
4443 + struct ext3_free_metadata *md;
4446 + BUG_ON(e3b->bd_bitmap_page == NULL);
4447 + BUG_ON(e3b->bd_buddy_page == NULL);
4449 + ext3_lock_group(sb, group);
4450 + for (i = 0; i < count; i++) {
4451 + md = db->bb_md_cur;
4452 + if (md && db->bb_tid != handle->h_transaction->t_tid) {
4453 + db->bb_md_cur = NULL;
4458 + ext3_unlock_group(sb, group);
4459 + md = kmalloc(sizeof(*md), GFP_KERNEL);
4463 + md->group = group;
4465 + ext3_lock_group(sb, group);
4466 + if (db->bb_md_cur == NULL) {
4467 + spin_lock(&sbi->s_md_lock);
4468 + list_add(&md->list, &sbi->s_active_transaction);
4469 + spin_unlock(&sbi->s_md_lock);
4470 + /* protect buddy cache from being freed,
4471 + * otherwise we'll refresh it from
4472 + * on-disk bitmap and lose not-yet-available
4474 + page_cache_get(e3b->bd_buddy_page);
4475 + page_cache_get(e3b->bd_bitmap_page);
4476 + db->bb_md_cur = md;
4477 + db->bb_tid = handle->h_transaction->t_tid;
4478 + mb_debug("new md 0x%p for group %u\n",
4482 + md = db->bb_md_cur;
4486 + BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
4487 + md->blocks[md->num] = block + i;
4489 + if (md->num == EXT3_BB_MAX_BLOCKS) {
4490 + /* no more space, put full container on a sb's list */
4491 + db->bb_md_cur = NULL;
4494 + ext3_unlock_group(sb, group);
4499 + * Main entry point into mballoc to free blocks
4501 +void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
4502 + unsigned long block, unsigned long count,
4503 + int metadata, int *freed)
4505 + struct buffer_head *bitmap_bh = NULL;
4506 + struct super_block *sb = inode->i_sb;
4507 + struct ext3_allocation_context ac;
4508 + struct ext3_group_desc *gdp;
4509 + struct ext3_super_block *es;
4510 + unsigned long bit, overflow;
4511 + struct buffer_head *gd_bh;
4512 + unsigned long block_group;
4513 + struct ext3_sb_info *sbi;
4514 + struct ext3_buddy e3b;
4519 + ext3_mb_poll_new_transaction(sb, handle);
4521 + sbi = EXT3_SB(sb);
4522 + es = EXT3_SB(sb)->s_es;
4523 + if (block < le32_to_cpu(es->s_first_data_block) ||
4524 + block + count < block ||
4525 + block + count > le32_to_cpu(es->s_blocks_count)) {
4526 + ext3_error (sb, __FUNCTION__,
4527 + "Freeing blocks not in datazone - "
4528 + "block = %lu, count = %lu", block, count);
4529 + goto error_return;
4532 + ext3_debug("freeing block %lu\n", block);
4534 + ac.ac_op = EXT3_MB_HISTORY_FREE;
4535 + ac.ac_inode = inode;
4540 + ext3_get_group_no_and_offset(sb, block, &block_group, &bit);
4543 + * Check to see if we are freeing blocks across a group
4546 + if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
4547 + overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
4548 + count -= overflow;
4550 + brelse(bitmap_bh);
4551 + bitmap_bh = read_block_bitmap(sb, block_group);
4553 + goto error_return;
4554 + gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
4556 + goto error_return;
4558 + if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
4559 + in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
4560 + in_range (block, le32_to_cpu(gdp->bg_inode_table),
4561 + EXT3_SB(sb)->s_itb_per_group) ||
4562 + in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
4563 + EXT3_SB(sb)->s_itb_per_group))
4564 + ext3_error(sb, __FUNCTION__,
4565 + "Freeing blocks in system zone - "
4566 + "Block = %lu, count = %lu", block, count);
4568 + BUFFER_TRACE(bitmap_bh, "getting write access");
4569 + err = ext3_journal_get_write_access(handle, bitmap_bh);
4571 + goto error_return;
4574 + * We are about to modify some metadata. Call the journal APIs
4575 + * to unshare ->b_data if a currently-committing transaction is
4578 + BUFFER_TRACE(gd_bh, "get_write_access");
4579 + err = ext3_journal_get_write_access(handle, gd_bh);
4581 + goto error_return;
4583 + err = ext3_mb_load_buddy(sb, block_group, &e3b);
4585 + goto error_return;
4587 +#ifdef AGGRESSIVE_CHECK
4590 + for (i = 0; i < count; i++)
4591 + BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4594 + mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, bit,
4597 + /* We dirtied the bitmap block */
4598 + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4599 + err = ext3_journal_dirty_metadata(handle, bitmap_bh);
4601 + ac.ac_b_ex.fe_group = block_group;
4602 + ac.ac_b_ex.fe_start = bit;
4603 + ac.ac_b_ex.fe_len = count;
4604 + ext3_mb_store_history(&ac);
4607 + /* blocks being freed are metadata. these blocks shouldn't
4608 + * be used until this transaction is committed */
4609 + ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
4611 + ext3_lock_group(sb, block_group);
4612 + err = mb_free_blocks(inode, &e3b, bit, count);
4613 + ext3_mb_return_to_preallocation(inode, &e3b, block, count);
4614 + ext3_unlock_group(sb, block_group);
4618 + spin_lock(sb_bgl_lock(sbi, block_group));
4619 + gdp->bg_free_blocks_count =
4620 + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
4621 + spin_unlock(sb_bgl_lock(sbi, block_group));
4622 + percpu_counter_mod(&sbi->s_freeblocks_counter, count);
4624 + ext3_mb_release_desc(&e3b);
4628 + /* And the group descriptor block */
4629 + BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4630 + ret = ext3_journal_dirty_metadata(handle, gd_bh);
4631 + if (!err) err = ret;
4633 + if (overflow && !err) {
4640 + brelse(bitmap_bh);
4641 + ext3_std_error(sb, err);