1 commit d8d8fd9192a54c7b8caef8cca9b7a1eb5e5e3298
2 Author: Alex Zhuravlev <alex.zhuravlev@sun.com>
3 AuthorDate: Thu Oct 23 10:02:19 2008 +0000
5 Subject: ext4: support for tunable preallocation window
6 Add support for tunable preallocation window and new tunables
7 for large/small requests.
10 Signed-off-by: Alex Zhuravlev <alex.zhuravlev@sun.com>
11 Reviewed-by: Kalpak Shah <kalpak@clusterfs.com>
12 Reviewed-by: Andreas Dilger <andreas.dilger@sun.com>
16 fs/ext4/mballoc.c | 220 +++++++++++++++++++++++++++++++++++-----------
17 fs/ext4/sysfs.c | 8 +-
18 4 files changed, 182 insertions(+), 56 deletions(-)
20 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
21 index bdd72d46..7168e4e4 100644
24 @@ -1290,6 +1290,8 @@ extern void ext4_set_bits(void *bm, int cur, int len);
25 #define EXT4_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
26 #define EXT4_DFL_CHECKINTERVAL 0 /* Don't use interval check */
28 +#define EXT4_MAX_PREALLOC_TABLE 64
31 * Behaviour when detecting errors
33 @@ -1594,11 +1596,13 @@ struct ext4_sb_info {
35 unsigned long s_stripe;
36 unsigned int s_mb_max_linear_groups;
37 - unsigned int s_mb_stream_request;
38 + unsigned long s_mb_small_req;
39 + unsigned long s_mb_large_req;
40 unsigned int s_mb_max_to_scan;
41 unsigned int s_mb_min_to_scan;
42 unsigned int s_mb_stats;
43 unsigned int s_mb_order2_reqs;
44 + unsigned long *s_mb_prealloc_table;
45 unsigned int s_mb_group_prealloc;
46 unsigned int s_mb_max_inode_prealloc;
47 unsigned int s_max_dir_size_kb;
48 @@ -2939,6 +2943,7 @@ int ext4_fc_record_regions(struct super_block *sb, int ino,
52 +extern const struct proc_ops ext4_seq_prealloc_table_fops;
53 extern const struct seq_operations ext4_mb_seq_groups_ops;
54 extern const struct seq_operations ext4_mb_seq_structs_summary_ops;
55 extern long ext4_mb_stats;
56 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
57 index 8fa8757e..bc7bcbc0 100644
60 @@ -2743,6 +2743,9 @@ static int ext4_writepages(struct address_space *mapping,
61 PAGE_SIZE >> inode->i_blkbits);
64 + if (wbc->nr_to_write < sbi->s_mb_small_req)
65 + wbc->nr_to_write = sbi->s_mb_small_req;
67 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
70 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
71 index e8f5f05b..e1e3da73 100644
72 --- a/fs/ext4/mballoc.c
73 +++ b/fs/ext4/mballoc.c
74 @@ -3091,6 +3091,99 @@ const struct seq_operations ext4_mb_seq_structs_summary_ops = {
75 .show = ext4_mb_seq_structs_summary_show,
78 +static int ext4_mb_check_and_update_prealloc(struct ext4_sb_info *sbi,
79 + char *str, size_t cnt,
82 + unsigned long value;
83 + unsigned long prev = 0;
92 + while ((cur < end) && (*cur == ' ')) cur++;
93 + value = simple_strtol(cur, &next, 0);
101 + if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
104 + /* they should add values in order */
109 + sbi->s_mb_prealloc_table[num] = value;
115 + if (num > EXT4_MAX_PREALLOC_TABLE - 1)
119 + sbi->s_mb_prealloc_table[num] = 0;
124 +static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
125 + const char __user *buf,
126 + size_t cnt, loff_t *pos)
128 + struct ext4_sb_info *sbi = EXT4_SB(pde_data(file_inode(file)));
132 + if (cnt >= sizeof(str))
134 + if (copy_from_user(str, buf, cnt))
137 + rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 0);
141 + rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 1);
142 + return rc ? rc : cnt;
145 +static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
147 + struct ext4_sb_info *sbi = EXT4_SB(m->private);
150 + for (i = 0; i < EXT4_MAX_PREALLOC_TABLE &&
151 + sbi->s_mb_prealloc_table[i] != 0; i++)
152 + seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
153 + seq_printf(m, "\n");
158 +static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
160 + return single_open(file, mb_prealloc_table_seq_show, pde_data(inode));
163 +const struct proc_ops ext4_seq_prealloc_table_fops = {
164 + .proc_open = mb_prealloc_table_seq_open,
165 + .proc_read = seq_read,
166 + .proc_lseek = seq_lseek,
167 + .proc_release = single_release,
168 + .proc_write = ext4_mb_prealloc_table_proc_write,
171 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
173 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
174 @@ -3407,7 +3500,7 @@ static void ext4_discard_work(struct work_struct *work)
175 int ext4_mb_init(struct super_block *sb)
177 struct ext4_sb_info *sbi = EXT4_SB(sb);
179 + unsigned i, j, k, l;
180 unsigned offset, offset_incr;
183 @@ -3479,7 +3572,6 @@ int ext4_mb_init(struct super_block *sb)
184 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
185 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
186 sbi->s_mb_stats = MB_DEFAULT_STATS;
187 - sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
188 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
189 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
191 @@ -3504,9 +3596,29 @@ int ext4_mb_init(struct super_block *sb)
192 * RAID stripe size so that preallocations don't fragment
195 - if (sbi->s_stripe > 1) {
196 - sbi->s_mb_group_prealloc = roundup(
197 - sbi->s_mb_group_prealloc, sbi->s_stripe);
199 + /* Allocate table once */
200 + sbi->s_mb_prealloc_table = kzalloc(
201 + EXT4_MAX_PREALLOC_TABLE * sizeof(unsigned long), GFP_NOFS);
202 + if (sbi->s_mb_prealloc_table == NULL) {
207 + if (sbi->s_stripe == 0) {
208 + for (k = 0, l = 4; k <= 9; ++k, l *= 2)
209 + sbi->s_mb_prealloc_table[k] = l;
211 + sbi->s_mb_small_req = 256;
212 + sbi->s_mb_large_req = 1024;
213 + sbi->s_mb_group_prealloc = 512;
215 + for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2)
216 + sbi->s_mb_prealloc_table[k] = l;
218 + sbi->s_mb_small_req = sbi->s_stripe;
219 + sbi->s_mb_large_req = sbi->s_stripe * 8;
220 + sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
223 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
224 @@ -3540,6 +3652,7 @@ out_free_locality_groups:
226 kfree(sbi->s_mb_largest_free_orders);
227 kfree(sbi->s_mb_largest_free_orders_locks);
228 + kfree(sbi->s_mb_prealloc_table);
229 kfree(sbi->s_mb_offsets);
230 sbi->s_mb_offsets = NULL;
231 kfree(sbi->s_mb_maxs);
232 @@ -3600,6 +3640,7 @@ int ext4_mb_release(struct super_block *sb)
236 + kfree(sbi->s_mb_prealloc_table);
237 kfree(sbi->s_mb_offsets);
238 kfree(sbi->s_mb_maxs);
239 iput(sbi->s_buddy_cache);
240 @@ -3810,7 +3923,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
243 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
244 - BUG_ON(ac->ac_b_ex.fe_len <= 0);
248 @@ -4046,13 +4158,14 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
250 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
251 struct ext4_super_block *es = sbi->s_es;
253 + int bsbits, i, wind;
255 - loff_t size, start_off;
257 loff_t orig_size __maybe_unused;
259 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
260 struct ext4_prealloc_space *pa;
261 + unsigned long value, last_non_zero;
263 /* do normalize only data requests, metadata requests
264 do not need preallocation */
265 @@ -4081,51 +4194,46 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
266 size = size << bsbits;
267 if (size < i_size_read(ac->ac_inode))
268 size = i_size_read(ac->ac_inode);
270 + size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
273 + value = last_non_zero = 0;
275 - /* max size of free chunks */
278 -#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
279 - (req <= (size) || max <= (chunk_size))
281 - /* first, try to predict filesize */
282 - /* XXX: should this table be tunable? */
284 - if (size <= 16 * 1024) {
286 - } else if (size <= 32 * 1024) {
288 - } else if (size <= 64 * 1024) {
290 - } else if (size <= 128 * 1024) {
292 - } else if (size <= 256 * 1024) {
294 - } else if (size <= 512 * 1024) {
296 - } else if (size <= 1024 * 1024) {
297 - size = 1024 * 1024;
298 - } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
299 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
300 - (21 - bsbits)) << 21;
301 - size = 2 * 1024 * 1024;
302 - } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
303 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
304 - (22 - bsbits)) << 22;
305 - size = 4 * 1024 * 1024;
306 - } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
307 - (8<<20)>>bsbits, max, 8 * 1024)) {
308 - start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
309 - (23 - bsbits)) << 23;
310 - size = 8 * 1024 * 1024;
311 + /* let's choose preallocation window depending on file size */
312 + for (i = 0; i < EXT4_MAX_PREALLOC_TABLE; i++) {
313 + value = sbi->s_mb_prealloc_table[i];
317 + last_non_zero = value;
319 + if (size <= value) {
326 + if (last_non_zero != 0) {
327 + __u64 tstart, tend;
328 + /* file is quite large, we now preallocate with
329 + * the biggest configured window with regart to
330 + * logical offset */
331 + wind = last_non_zero;
332 + tstart = ac->ac_o_ex.fe_logical;
333 + do_div(tstart, wind);
334 + start = tstart * wind;
335 + tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
336 + do_div(tend, wind);
337 + tend = tend * wind + wind;
338 + size = tend - start;
341 - start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
342 - size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
343 - ac->ac_o_ex.fe_len) << bsbits;
346 - size = size >> bsbits;
347 - start = start_off >> bsbits;
353 * For tiny groups (smaller than 8MB) the chosen allocation
354 @@ -4216,7 +4324,6 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
355 (unsigned long) ac->ac_o_ex.fe_logical);
358 - BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
360 /* now prepare goal request */
362 @@ -5249,8 +5356,8 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
363 inode_pa_eligible = false;
365 size = max(size, isize);
366 - /* Don't use group allocation for large files */
367 - if (size > sbi->s_mb_stream_request)
368 + if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
369 + (size >= sbi->s_mb_large_req))
370 group_pa_eligible = false;
372 if (!group_pa_eligible) {
373 @@ -5261,6 +5368,13 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
378 + * request is so large that we don't care about
379 + * streaming - it overweights any possible seek
381 + if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
384 BUG_ON(ac->ac_lg != NULL);
386 * locality group prealloc space are per cpu. The reason for having
387 diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
388 index aa07b78b..eef2fadb 100644
389 --- a/fs/ext4/sysfs.c
390 +++ b/fs/ext4/sysfs.c
391 @@ -212,7 +212,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
392 EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
393 EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
394 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
395 -EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
396 +EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
397 +EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
398 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
399 EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
400 EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
401 @@ -261,7 +262,8 @@ static struct attribute *ext4_attrs[] = {
402 ATTR_LIST(mb_max_to_scan),
403 ATTR_LIST(mb_min_to_scan),
404 ATTR_LIST(mb_order2_req),
405 - ATTR_LIST(mb_stream_req),
406 + ATTR_LIST(mb_small_req),
407 + ATTR_LIST(mb_large_req),
408 ATTR_LIST(mb_group_prealloc),
409 ATTR_LIST(mb_max_inode_prealloc),
410 ATTR_LIST(mb_max_linear_groups),
411 @@ -546,6 +548,8 @@ int ext4_register_sysfs(struct super_block *sb)
412 ext4_fc_info_show, sb);
413 proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
414 &ext4_mb_seq_groups_ops, sb);
415 + proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
416 + &ext4_seq_prealloc_table_fops, sb);
417 proc_create_single_data("mb_stats", 0444, sbi->s_proc,
418 ext4_seq_mb_stats_show, sb);
419 proc_create_seq_data("mb_structs_summary", 0444, sbi->s_proc,