1 Index: linux-2.6.27.21-0.1/fs/ext4/ext4_sb.h
2 ===================================================================
3 --- linux-2.6.27.21-0.1.orig/fs/ext4/ext4_sb.h
4 +++ linux-2.6.27.21-0.1/fs/ext4/ext4_sb.h
5 @@ -111,11 +111,14 @@ struct ext4_sb_info {
8 unsigned long s_stripe;
9 - unsigned int s_mb_stream_request;
10 + unsigned long s_mb_small_req;
11 + unsigned long s_mb_large_req;
12 unsigned int s_mb_max_to_scan;
13 unsigned int s_mb_min_to_scan;
14 unsigned int s_mb_stats;
15 unsigned int s_mb_order2_reqs;
16 + unsigned long *s_mb_prealloc_table;
17 + unsigned long s_mb_prealloc_table_size;
18 unsigned int s_mb_group_prealloc;
19 /* where last allocation was done - for stream allocation */
20 unsigned long s_mb_last_group;
21 Index: linux-2.6.27.21-0.1/fs/ext4/mballoc.c
22 ===================================================================
23 --- linux-2.6.27.21-0.1.orig/fs/ext4/mballoc.c
24 +++ linux-2.6.27.21-0.1/fs/ext4/mballoc.c
25 @@ -1996,7 +1996,7 @@ ext4_mb_regular_allocator(struct ext4_al
29 - if (size < sbi->s_mb_stream_request &&
30 + if ((ac->ac_g_ex.fe_len < sbi->s_mb_large_req) &&
31 (ac->ac_flags & EXT4_MB_HINT_DATA)) {
32 /* TBD: may be hot point */
33 spin_lock(&sbi->s_md_lock);
34 @@ -2686,6 +2686,26 @@ err_freesgi:
38 +static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
42 + if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
45 + for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
46 + if (sbi->s_mb_prealloc_table[i] == 0) {
47 + sbi->s_mb_prealloc_table[i] = value;
51 + /* they should add values in order */
52 + if (value <= sbi->s_mb_prealloc_table[i])
58 int ext4_mb_init(struct super_block *sb, int needs_recovery)
60 struct ext4_sb_info *sbi = EXT4_SB(sb);
61 @@ -2738,13 +2758,55 @@ int ext4_mb_init(struct super_block *sb,
62 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
63 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
64 sbi->s_mb_stats = MB_DEFAULT_STATS;
65 - sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
66 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
67 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
68 - sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
70 + if (sbi->s_stripe == 0) {
71 + sbi->s_mb_prealloc_table_size = 8;
72 + i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
73 + sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
74 + if (sbi->s_mb_prealloc_table == NULL) {
75 + kfree(sbi->s_mb_offsets);
76 + kfree(sbi->s_mb_maxs);
79 + memset(sbi->s_mb_prealloc_table, 0, i);
81 + ext4_mb_prealloc_table_add(sbi, 4);
82 + ext4_mb_prealloc_table_add(sbi, 8);
83 + ext4_mb_prealloc_table_add(sbi, 16);
84 + ext4_mb_prealloc_table_add(sbi, 32);
85 + ext4_mb_prealloc_table_add(sbi, 64);
86 + ext4_mb_prealloc_table_add(sbi, 128);
87 + ext4_mb_prealloc_table_add(sbi, 256);
88 + ext4_mb_prealloc_table_add(sbi, 512);
90 + sbi->s_mb_small_req = 256;
91 + sbi->s_mb_large_req = 1024;
92 + sbi->s_mb_group_prealloc = 512;
94 + sbi->s_mb_prealloc_table_size = 3;
95 + i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
96 + sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
97 + if (sbi->s_mb_prealloc_table == NULL) {
98 + kfree(sbi->s_mb_offsets);
99 + kfree(sbi->s_mb_maxs);
102 + memset(sbi->s_mb_prealloc_table, 0, i);
104 + ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
105 + ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
106 + ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
108 + sbi->s_mb_small_req = sbi->s_stripe;
109 + sbi->s_mb_large_req = sbi->s_stripe * 8;
110 + sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
113 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
114 if (sbi->s_locality_groups == NULL) {
115 + kfree(sbi->s_mb_prealloc_table);
116 kfree(sbi->s_mb_offsets);
117 kfree(sbi->s_mb_maxs);
119 @@ -2915,9 +2977,89 @@ ext4_mb_free_committed_blocks(struct sup
120 #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
121 #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
122 #define EXT4_MB_ORDER2_REQ "order2_req"
123 -#define EXT4_MB_STREAM_REQ "stream_req"
124 +#define EXT4_MB_SMALL_REQ "small_req"
125 +#define EXT4_MB_LARGE_REQ "large_req"
126 +#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
127 #define EXT4_MB_GROUP_PREALLOC "group_prealloc"
129 +static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
130 + int count, int *eof, void *data)
132 + struct ext4_sb_info *sbi = data;
140 + for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
141 + len += sprintf(page + len, "%ld ",
142 + sbi->s_mb_prealloc_table[i]);
143 + len += sprintf(page + len, "\n");
149 +static int ext4_mb_prealloc_table_proc_write(struct file *file,
150 + const char __user *buf,
151 + unsigned long cnt, void *data)
153 + struct ext4_sb_info *sbi = data;
154 + unsigned long value;
155 + unsigned long prev = 0;
159 + unsigned long *new_table;
163 + if (cnt >= sizeof(str))
165 + if (copy_from_user(str, buf, cnt))
171 + while (cur < end) {
172 + while ((cur < end) && (*cur == ' ')) cur++;
173 + value = simple_strtol(cur, &cur, 0);
182 + new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
183 + if (new_table == NULL)
185 + kfree(sbi->s_mb_prealloc_table);
186 + memset(new_table, 0, num * sizeof(*new_table));
187 + sbi->s_mb_prealloc_table = new_table;
188 + sbi->s_mb_prealloc_table_size = num;
191 + while (cur < end && i < num) {
192 + while ((cur < end) && (*cur == ' ')) cur++;
193 + value = simple_strtol(cur, &cur, 0);
194 + ext4_mb_prealloc_table_add(sbi, value);
201 +static const struct file_operations ext4_mb_prealloc_table_proc_fops = {
202 + .owner = THIS_MODULE,
203 + .read = ext4_mb_prealloc_table_proc_read,
204 + .write = ext4_mb_prealloc_table_proc_write,
207 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
209 #ifdef CONFIG_PROC_FS
210 @@ -2932,13 +3074,17 @@ static int ext4_mb_init_per_dev_proc(str
211 EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
212 EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
213 EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
214 - EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
215 + EXT4_PROC_HANDLER(EXT4_MB_SMALL_REQ, mb_small_req);
216 + EXT4_PROC_HANDLER(EXT4_MB_LARGE_REQ, mb_large_req);
217 + EXT4_PROC_HANDLER(EXT4_MB_PREALLOC_TABLE, mb_prealloc_table);
218 EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
222 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
223 - remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
224 + remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
225 + remove_proc_entry(EXT4_MB_LARGE_REQ, sbi->s_proc);
226 + remove_proc_entry(EXT4_MB_SMALL_REQ, sbi->s_proc);
227 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
228 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
229 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
230 @@ -2959,7 +3105,9 @@ static int ext4_mb_destroy_per_dev_proc(
233 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
234 - remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
235 + remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
236 + remove_proc_entry(EXT4_MB_LARGE_REQ, sbi->s_proc);
237 + remove_proc_entry(EXT4_MB_SMALL_REQ, sbi->s_proc);
238 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
239 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
240 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
241 @@ -3162,11 +3310,12 @@ static noinline_for_stack void
242 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
243 struct ext4_allocation_request *ar)
246 + int bsbits, i, wind;
248 - loff_t size, orig_size, start_off;
249 + loff_t size, orig_size;
250 ext4_lblk_t start, orig_start;
251 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
252 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
253 struct ext4_prealloc_space *pa;
255 /* do normalize only data requests, metadata requests
256 @@ -3196,49 +3345,35 @@ ext4_mb_normalize_request(struct ext4_al
257 size = size << bsbits;
258 if (size < i_size_read(ac->ac_inode))
259 size = i_size_read(ac->ac_inode);
260 + size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
262 - /* max size of free chunks */
266 -#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
267 - (req <= (size) || max <= (chunk_size))
268 + /* let's choose preallocation window depending on file size */
269 + for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
270 + if (size <= sbi->s_mb_prealloc_table[i]) {
271 + wind = sbi->s_mb_prealloc_table[i];
277 - /* first, try to predict filesize */
278 - /* XXX: should this table be tunable? */
280 - if (size <= 16 * 1024) {
282 - } else if (size <= 32 * 1024) {
284 - } else if (size <= 64 * 1024) {
286 - } else if (size <= 128 * 1024) {
288 - } else if (size <= 256 * 1024) {
290 - } else if (size <= 512 * 1024) {
292 - } else if (size <= 1024 * 1024) {
293 - size = 1024 * 1024;
294 - } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
295 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
296 - (21 - bsbits)) << 21;
297 - size = 2 * 1024 * 1024;
298 - } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
299 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
300 - (22 - bsbits)) << 22;
301 - size = 4 * 1024 * 1024;
302 - } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
303 - (8<<20)>>bsbits, max, 8 * 1024)) {
304 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
305 - (23 - bsbits)) << 23;
306 - size = 8 * 1024 * 1024;
308 - start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
309 - size = ac->ac_o_ex.fe_len << bsbits;
311 + __u64 tstart, tend;
312 + /* file is quite large, we now preallocate with
313 + * the biggest configured window with regart to
314 + * logical offset */
315 + wind = sbi->s_mb_prealloc_table[i - 1];
316 + tstart = ac->ac_o_ex.fe_logical;
317 + do_div(tstart, wind);
318 + start = tstart * wind;
319 + tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
320 + do_div(tend, wind);
321 + tend = tend * wind + wind;
322 + size = tend - start;
324 - orig_size = size = size >> bsbits;
325 - orig_start = start = start_off >> bsbits;
327 + orig_start = start;
329 /* don't cover already allocated blocks in selected range */
330 if (ar->pleft && start <= ar->lleft) {
331 @@ -3315,7 +3450,6 @@ ext4_mb_normalize_request(struct ext4_al
333 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
334 start > ac->ac_o_ex.fe_logical);
335 - BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
337 /* now prepare goal request */
339 @@ -4236,22 +4370,32 @@ static void ext4_mb_group_or_file(struct
341 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
342 int bsbits = ac->ac_sb->s_blocksize_bits;
343 - loff_t size, isize;
346 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
349 - size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
350 - isize = i_size_read(ac->ac_inode) >> bsbits;
351 - size = max(size, isize);
353 - /* don't use group allocation for large files */
354 - if (size >= sbi->s_mb_stream_request)
355 + if (ac->ac_o_ex.fe_len >= sbi->s_mb_small_req)
358 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
361 + /* request is so large that we don't care about
362 + * streaming - it overweights any possible seek */
363 + if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
366 + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
367 + size = size << bsbits;
368 + if (size < i_size_read(ac->ac_inode))
369 + size = i_size_read(ac->ac_inode);
370 + size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
372 + /* don't use group allocation for large files */
373 + if (size >= sbi->s_mb_large_req)
376 BUG_ON(ac->ac_lg != NULL);
378 * locality group prealloc space are per cpu. The reason for having
379 Index: linux-2.6.27.21-0.1/fs/ext4/inode.c
380 ===================================================================
381 --- linux-2.6.27.21-0.1.orig/fs/ext4/inode.c
382 +++ linux-2.6.27.21-0.1/fs/ext4/inode.c
383 @@ -2442,14 +2442,14 @@ static int ext4_da_writepages(struct add
387 - * Make sure nr_to_write is >= sbi->s_mb_stream_request
388 + * Make sure nr_to_write is >= sbi->s_mb_small_req
389 * This make sure small files blocks are allocated in
390 * single attempt. This ensure that small files
391 * get less fragmented.
393 - if (wbc->nr_to_write < sbi->s_mb_stream_request) {
394 - nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
395 - wbc->nr_to_write = sbi->s_mb_stream_request;
396 + if (wbc->nr_to_write < sbi->s_mb_small_req) {
397 + nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
398 + wbc->nr_to_write = sbi->s_mb_small_req;
400 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)