1 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2 index 3b9ec24..64dc5fd 100644
5 @@ -1450,6 +1450,9 @@ struct ext4_sb_info {
6 unsigned int s_mb_min_to_scan;
7 unsigned int s_mb_stats;
8 unsigned int s_mb_order2_reqs;
9 + ext4_fsblk_t s_mb_c1_blocks;
10 + ext4_fsblk_t s_mb_c2_blocks;
11 + ext4_fsblk_t s_mb_c3_blocks;
12 unsigned long *s_mb_prealloc_table;
13 unsigned int s_mb_group_prealloc;
14 unsigned int s_max_dir_size_kb;
15 @@ -1466,6 +1469,9 @@ struct ext4_sb_info {
16 atomic_t s_bal_goals; /* goal hits */
17 atomic_t s_bal_breaks; /* too long searches */
18 atomic_t s_bal_2orders; /* 2^order hits */
19 + /* cX loop didn't find blocks */
20 + atomic64_t s_bal_cX_failed[4];
21 + atomic64_t s_bal_cX_skipped[3];
22 spinlock_t s_bal_lock;
23 unsigned long s_mb_buddies_generated;
24 unsigned long long s_mb_generation_time;
25 @@ -2563,6 +2569,9 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
27 extern const struct file_operations ext4_seq_prealloc_table_fops;
28 extern const struct seq_operations ext4_mb_seq_groups_ops;
29 +extern const struct file_operations ext4_mb_seq_alloc_fops;
30 +extern int save_threshold_percent(struct ext4_sb_info *sbi, const char *buf,
31 + ext4_fsblk_t *blocks);
32 extern const struct file_operations ext4_seq_mb_last_group_fops;
33 extern int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v);
34 extern long ext4_mb_stats;
35 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
36 index 15c962f..7870406 100644
37 --- a/fs/ext4/mballoc.c
38 +++ b/fs/ext4/mballoc.c
39 @@ -2104,6 +2104,20 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
43 +static u64 available_blocks_count(struct ext4_sb_info *sbi)
45 + ext4_fsblk_t resv_blocks;
47 + struct ext4_super_block *es = sbi->s_es;
49 + resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
50 + bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
51 + percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
53 + bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
54 + return bfree - (ext4_r_blocks_count(es) + resv_blocks);
57 static noinline_for_stack int
58 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
60 @@ -2113,6 +2127,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
61 struct ext4_sb_info *sbi;
62 struct super_block *sb;
63 struct ext4_buddy e4b;
64 + ext4_fsblk_t avail_blocks;
68 @@ -2165,6 +2180,21 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
70 /* Let's just scan groups to find more-less suitable blocks */
71 cr = ac->ac_2order ? 0 : 1;
73 + /* Choose what loop to pass based on disk fullness */
74 + avail_blocks = available_blocks_count(sbi) ;
76 + if (avail_blocks < sbi->s_mb_c3_blocks) {
78 + atomic64_inc(&sbi->s_bal_cX_skipped[2]);
79 + } else if(avail_blocks < sbi->s_mb_c2_blocks) {
81 + atomic64_inc(&sbi->s_bal_cX_skipped[1]);
82 + } else if(avail_blocks < sbi->s_mb_c1_blocks) {
84 + atomic64_inc(&sbi->s_bal_cX_skipped[0]);
88 * cr == 0 try to get exact allocation,
89 * cr == 3 try to get anything
90 @@ -2230,6 +2260,9 @@ repeat:
91 if (ac->ac_status != AC_STATUS_CONTINUE)
94 + /* Processed all groups and haven't found blocks */
96 + atomic64_inc(&sbi->s_bal_cX_failed[cr]);
99 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
100 @@ -2510,6 +2543,96 @@ const struct file_operations ext4_seq_mb_last_group_fops = {
101 .write = ext4_mb_last_group_write,
104 +static int mb_seq_alloc_show(struct seq_file *seq, void *v)
106 + struct super_block *sb = seq->private;
107 + struct ext4_sb_info *sbi = EXT4_SB(sb);
109 + seq_printf(seq, "mballoc:\n");
110 + seq_printf(seq, "\tblocks: %u\n", atomic_read(&sbi->s_bal_allocated));
111 + seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
112 + seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
114 + seq_printf(seq, "\textents_scanned: %u\n",
115 + atomic_read(&sbi->s_bal_ex_scanned));
116 + seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
117 + seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
118 + seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
119 + seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
121 + seq_printf(seq, "\tuseless_c0_loops: %llu\n",
122 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[0]));
123 + seq_printf(seq, "\tuseless_c1_loops: %llu\n",
124 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[1]));
125 + seq_printf(seq, "\tuseless_c2_loops: %llu\n",
126 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[2]));
127 + seq_printf(seq, "\tuseless_c3_loops: %llu\n",
128 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[3]));
129 + seq_printf(seq, "\tskipped_c0_loops: %llu\n",
130 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[0]));
131 + seq_printf(seq, "\tskipped_c1_loops: %llu\n",
132 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[1]));
133 + seq_printf(seq, "\tskipped_c2_loops: %llu\n",
134 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[2]));
135 + seq_printf(seq, "\tbuddies_generated: %lu\n",
136 + sbi->s_mb_buddies_generated);
137 + seq_printf(seq, "\tbuddies_time_used: %llu\n", sbi->s_mb_generation_time);
138 + seq_printf(seq, "\tpreallocated: %u\n",
139 + atomic_read(&sbi->s_mb_preallocated));
140 + seq_printf(seq, "\tdiscarded: %u\n",
141 + atomic_read(&sbi->s_mb_discarded));
145 +static ssize_t mb_seq_alloc_write(struct file *file,
146 + const char __user *buf,
147 + size_t cnt, loff_t *pos)
149 + struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
151 + atomic_set(&sbi->s_bal_allocated, 0),
152 + atomic_set(&sbi->s_bal_reqs, 0),
153 + atomic_set(&sbi->s_bal_success, 0);
155 + atomic_set(&sbi->s_bal_ex_scanned, 0),
156 + atomic_set(&sbi->s_bal_goals, 0),
157 + atomic_set(&sbi->s_bal_2orders, 0),
158 + atomic_set(&sbi->s_bal_breaks, 0),
159 + atomic_set(&sbi->s_mb_lost_chunks, 0);
161 + atomic64_set(&sbi->s_bal_cX_failed[0], 0),
162 + atomic64_set(&sbi->s_bal_cX_failed[1], 0),
163 + atomic64_set(&sbi->s_bal_cX_failed[2], 0);
164 + atomic64_set(&sbi->s_bal_cX_failed[3], 0);
166 + atomic64_set(&sbi->s_bal_cX_skipped[0], 0),
167 + atomic64_set(&sbi->s_bal_cX_skipped[1], 0),
168 + atomic64_set(&sbi->s_bal_cX_skipped[2], 0);
171 + sbi->s_mb_buddies_generated = 0;
172 + sbi->s_mb_generation_time = 0;
174 + atomic_set(&sbi->s_mb_preallocated, 0),
175 + atomic_set(&sbi->s_mb_discarded, 0);
180 +static int mb_seq_alloc_open(struct inode *inode, struct file *file)
182 + return single_open(file, mb_seq_alloc_show, PDE_DATA(inode));
185 +const struct file_operations ext4_mb_seq_alloc_fops = {
186 + .owner = THIS_MODULE,
187 + .open = mb_seq_alloc_open,
189 + .llseek = seq_lseek,
190 + .release = single_release,
191 + .write = mb_seq_alloc_write,
194 int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v)
196 struct ext4_sb_info *sbi = EXT4_SB(m->private);
197 @@ -2734,6 +2854,8 @@ static int ext4_groupinfo_create_slab(size_t size)
201 +#define THRESHOLD_BLOCKS(sbi, percent) \
202 + (ext4_blocks_count((sbi)->s_es) / 100 * (percent))
203 int ext4_mb_init(struct super_block *sb)
205 struct ext4_sb_info *sbi = EXT4_SB(sb);
206 @@ -2787,6 +2908,15 @@ int ext4_mb_init(struct super_block *sb)
207 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
208 sbi->s_mb_stats = MB_DEFAULT_STATS;
209 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
210 + if (!sbi->s_mb_c1_blocks)
211 + sbi->s_mb_c1_blocks =
212 + THRESHOLD_BLOCKS(sbi, MB_DEFAULT_C1_THRESHOLD);
213 + if (!sbi->s_mb_c2_blocks)
214 + sbi->s_mb_c2_blocks =
215 + THRESHOLD_BLOCKS(sbi, MB_DEFAULT_C2_THRESHOLD);
216 + if (!sbi->s_mb_c3_blocks)
217 + sbi->s_mb_c3_blocks =
218 + THRESHOLD_BLOCKS(sbi, MB_DEFAULT_C3_THRESHOLD);
220 * The default group preallocation is 512, which for 4k block
221 * sizes translates to 2 megabytes. However for bigalloc file
222 @@ -2922,6 +3046,17 @@ int ext4_mb_release(struct super_block *sb)
223 atomic_read(&sbi->s_bal_allocated),
224 atomic_read(&sbi->s_bal_reqs),
225 atomic_read(&sbi->s_bal_success));
226 + ext4_msg(sb, KERN_INFO,
227 + "mballoc: (%llu, %llu, %llu, %llu) useless c(0,1,2,3) loops",
228 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[0]),
229 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[1]),
230 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[2]),
231 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_failed[3]));
232 + ext4_msg(sb, KERN_INFO,
233 + "mballoc: (%llu, %llu, %llu) skipped c(0,1,2) loops",
234 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[0]),
235 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[1]),
236 + (unsigned long long)atomic64_read(&sbi->s_bal_cX_skipped[2]));
237 ext4_msg(sb, KERN_INFO,
238 "mballoc: %u extents scanned, %u goal hits, "
239 "%u 2^N hits, %u breaks, %u lost",
240 diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
241 index e00c3b7..d02daaf 100644
242 --- a/fs/ext4/mballoc.h
243 +++ b/fs/ext4/mballoc.h
244 @@ -72,6 +72,9 @@ do { \
245 * for which requests use 2^N search using buddies
247 #define MB_DEFAULT_ORDER2_REQS 8
248 +#define MB_DEFAULT_C1_THRESHOLD 25
249 +#define MB_DEFAULT_C2_THRESHOLD 15
250 +#define MB_DEFAULT_C3_THRESHOLD 5
253 * default group prealloc size 512 blocks
254 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
255 ===================================================================
256 --- linux-stage.orig/fs/ext4/super.c
257 +++ linux-stage/fs/ext4/super.c
258 @@ -1450,6 +1450,7 @@ enum {
259 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
260 Opt_inode_readahead_blks, Opt_journal_ioprio,
261 Opt_dioread_nolock, Opt_dioread_lock,
262 + Opt_mb_c1_threshold, Opt_mb_c2_threshold, Opt_mb_c3_threshold,
263 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
264 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
266 @@ -1604,6 +1605,9 @@ static const match_table_t tokens = {
267 {Opt_init_itable, "init_itable"},
268 {Opt_noinit_itable, "noinit_itable"},
269 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
270 + {Opt_mb_c1_threshold, "mb_c1_threshold=%s"},
271 + {Opt_mb_c2_threshold, "mb_c2_threshold=%s"},
272 + {Opt_mb_c3_threshold, "mb_c3_threshold=%s"},
273 {Opt_test_dummy_encryption, "test_dummy_encryption"},
274 {Opt_nombcache, "nombcache"},
275 {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
276 @@ -1748,6 +1752,9 @@ static const struct mount_opts {
277 {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
278 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
279 {Opt_max_dir_size_kb, 0, MOPT_GTE0},
280 + {Opt_mb_c1_threshold, 0, MOPT_STRING},
281 + {Opt_mb_c2_threshold, 0, MOPT_STRING},
282 + {Opt_mb_c3_threshold, 0, MOPT_STRING},
283 {Opt_test_dummy_encryption, 0, MOPT_GTE0},
284 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
286 @@ -1874,6 +1881,12 @@ static const struct mount_opts {
287 sbi->s_max_dir_size_kb = arg;
288 /* reset s_warning_dir_size and make it re-calculated */
289 sbi->s_warning_dir_size = 0;
290 + } else if (token == Opt_mb_c1_threshold) {
291 + save_threshold_percent(sbi, args[0].from, &sbi->s_mb_c1_blocks);
292 + } else if (token == Opt_mb_c2_threshold) {
293 + save_threshold_percent(sbi, args[0].from, &sbi->s_mb_c2_blocks);
294 + } else if (token == Opt_mb_c3_threshold) {
295 + save_threshold_percent(sbi, args[0].from, &sbi->s_mb_c3_blocks);
296 } else if (token == Opt_stripe) {
298 } else if (token == Opt_resuid) {
299 diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
300 index 417b33a..f49821e 100644
301 --- a/fs/ext4/sysfs.c
302 +++ b/fs/ext4/sysfs.c
306 attr_delayed_allocation_blocks,
307 + attr_mb_c1_threshold,
308 + attr_mb_c2_threshold,
309 + attr_mb_c3_threshold,
310 attr_session_write_kbytes,
311 attr_lifetime_write_kbytes,
312 attr_reserved_clusters,
313 @@ -134,6 +137,32 @@ static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf)
314 task_pid_vnr(sbi->s_journal->j_task));
317 +int save_threshold_percent(struct ext4_sb_info *sbi, const char *buf,
318 + ext4_fsblk_t *blocks)
320 + unsigned long long val;
324 + ret = kstrtoull(skip_spaces(buf), 0, &val);
325 + if (ret || val > 100)
328 + *blocks = val * ext4_blocks_count(sbi->s_es) / 100;
332 +#define THRESHOLD_PERCENT(sbi, blocks) \
333 + (((blocks) - 1) * 100 / ext4_blocks_count((sbi)->s_es) + 1)
334 +static ssize_t mb_threshold_store(struct ext4_sb_info *sbi,
335 + const char *buf, size_t count,
336 + ext4_fsblk_t *blocks)
338 + int ret = save_threshold_percent(sbi, buf, blocks);
340 + return ret ?: count;
343 #define EXT4_ATTR(_name,_mode,_id) \
344 static struct ext4_attr ext4_attr_##_name = { \
345 .attr = {.name = __stringify(_name), .mode = _mode }, \
346 @@ -176,6 +205,9 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
347 EXT4_ATTR_FUNC(session_write_kbytes, 0444);
348 EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
349 EXT4_ATTR_FUNC(reserved_clusters, 0644);
350 +EXT4_ATTR_FUNC(mb_c1_threshold, 0644);
351 +EXT4_ATTR_FUNC(mb_c2_threshold, 0644);
352 +EXT4_ATTR_FUNC(mb_c3_threshold, 0644);
354 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
355 ext4_sb_info, s_inode_readahead_blks);
356 @@ -211,6 +243,9 @@ static struct attribute *ext4_attrs[] = {
357 ATTR_LIST(session_write_kbytes),
358 ATTR_LIST(lifetime_write_kbytes),
359 ATTR_LIST(reserved_clusters),
360 + ATTR_LIST(mb_c1_threshold),
361 + ATTR_LIST(mb_c2_threshold),
362 + ATTR_LIST(mb_c3_threshold),
363 ATTR_LIST(inode_readahead_blks),
364 ATTR_LIST(inode_goal),
365 ATTR_LIST(max_dir_size),
366 @@ -294,6 +329,15 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
367 return snprintf(buf, PAGE_SIZE, "%llu\n",
369 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
370 + case attr_mb_c1_threshold:
371 + return scnprintf(buf, PAGE_SIZE, "%llu\n",
372 + THRESHOLD_PERCENT(sbi, sbi->s_mb_c1_blocks));
373 + case attr_mb_c2_threshold:
374 + return scnprintf(buf, PAGE_SIZE, "%llu\n",
375 + THRESHOLD_PERCENT(sbi, sbi->s_mb_c2_blocks));
376 + case attr_mb_c3_threshold:
377 + return scnprintf(buf, PAGE_SIZE, "%llu\n",
378 + THRESHOLD_PERCENT(sbi, sbi->s_mb_c3_blocks));
379 case attr_session_write_kbytes:
380 return session_write_kbytes_show(sbi, buf);
381 case attr_lifetime_write_kbytes:
382 @@ -363,6 +407,12 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
383 return inode_readahead_blks_store(sbi, buf, len);
384 case attr_trigger_test_error:
385 return trigger_test_error(sbi, buf, len);
386 + case attr_mb_c1_threshold:
387 + return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c1_blocks);
388 + case attr_mb_c2_threshold:
389 + return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c2_blocks);
390 + case attr_mb_c3_threshold:
391 + return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c3_blocks);
395 @@ -425,6 +475,8 @@ int ext4_register_sysfs(struct super_block *sb)
396 &ext4_seq_mb_last_group_fops, sb);
397 proc_create_single_data("mb_last_start", S_IRUGO, sbi->s_proc,
398 ext4_mb_seq_last_start_seq_show, sb);
399 + proc_create_data("mb_alloc_stats", S_IFREG | S_IRUGO | S_IWUSR,
400 + sbi->s_proc, &ext4_mb_seq_alloc_fops, sb);