Whamcloud - gitweb
LU-17672 ldiskfs: release s_mb_prealloc_table
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / suse15 / ext4-prealloc.patch
1 commit d8d8fd9192a54c7b8caef8cca9b7a1eb5e5e3298
2 Author: Alex Zhuravlev <alex.zhuravlev@sun.com>
3 AuthorDate: Thu Oct 23 10:02:19 2008 +0000
4
5 Subject: ext4: support for tunable preallocation window
6 Add support for tunable preallocation window and new tunables
7 for large/small requests.
8
9 Bugzilla-ID: b=12800
10 Signed-off-by: Alex Zhuravlev <alex.zhuravlev@sun.com>
11 Reviewed-by: Kalpak Shah <kalpak@clusterfs.com>
12 Reviewed-by: Andreas Dilger <andreas.dilger@sun.com>
13
14 Index: linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
15 ===================================================================
16 --- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/ext4.h
17 +++ linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
18 @@ -1243,11 +1243,14 @@ struct ext4_sb_info {
19  
20         /* tunables */
21         unsigned long s_stripe;
22 -       unsigned int s_mb_stream_request;
23 +       unsigned long s_mb_small_req;
24 +       unsigned long s_mb_large_req;
25         unsigned int s_mb_max_to_scan;
26         unsigned int s_mb_min_to_scan;
27         unsigned int s_mb_stats;
28         unsigned int s_mb_order2_reqs;
29 +       unsigned long *s_mb_prealloc_table;
30 +       unsigned long s_mb_prealloc_table_size;
31         unsigned int s_mb_group_prealloc;
32         unsigned int s_max_writeback_mb_bump;
33         unsigned int s_max_dir_size_kb;
34 @@ -2243,6 +1243,7 @@ struct ext4_sb_info {
35  extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
36  
37  /* mballoc.c */
38 +extern const struct file_operations ext4_seq_prealloc_table_fops;
39  extern const struct file_operations ext4_seq_mb_groups_fops;
40  extern long ext4_mb_stats;
41  extern long ext4_mb_max_to_scan;
42 Index: linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
43 ===================================================================
44 --- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/mballoc.c
45 +++ linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
46 @@ -1828,6 +1828,26 @@ int ext4_mb_find_by_goal(struct ext4_all
47         return 0;
48  }
49  
50 +static int ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
51 +{
52 +       int i;
53 +
54 +       if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
55 +               return -1;
56 +
57 +       for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
58 +               if (sbi->s_mb_prealloc_table[i] == 0) {
59 +                       sbi->s_mb_prealloc_table[i] = value;
60 +                       return 0;
61 +               }
62 +
63 +               /* they should add values in order */
64 +               if (value <= sbi->s_mb_prealloc_table[i])
65 +                       return -1;
66 +       }
67 +       return -1;
68 +}
69 +
70  /*
71   * The routine scans buddy structures (not bitmap!) from given order
72   * to max order and tries to find big enough chunk to satisfy the req
73 @@ -2263,6 +2282,88 @@ static const struct seq_operations ext4_
74         .show   = ext4_mb_seq_groups_show,
75  };
76  
77 +static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
78 +                                            const char __user *buf,
79 +                                            size_t cnt, loff_t *pos)
80 +{
81 +       struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
82 +       unsigned long value;
83 +       unsigned long prev = 0;
84 +       char str[128];
85 +       char *cur;
86 +       char *end;
87 +       unsigned long *new_table;
88 +       int num = 0;
89 +       int i = 0;
90 +
91 +       if (cnt >= sizeof(str))
92 +               return -EINVAL;
93 +       if (copy_from_user(str, buf, cnt))
94 +               return -EFAULT;
95 +
96 +       num = 0;
97 +       cur = str;
98 +       end = str + cnt;
99 +       while (cur < end) {
100 +               while ((cur < end) && (*cur == ' '))
101 +                       cur++;
102 +               value = simple_strtol(cur, &cur, 0);
103 +               if (value == 0)
104 +                       break;
105 +               if (value <= prev)
106 +                       return -EINVAL;
107 +               prev = value;
108 +               num++;
109 +       }
110 +
111 +       new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
112 +       if (new_table == NULL)
113 +               return -ENOMEM;
114 +       kfree(sbi->s_mb_prealloc_table);
115 +       memset(new_table, 0, num * sizeof(*new_table));
116 +       sbi->s_mb_prealloc_table = new_table;
117 +       sbi->s_mb_prealloc_table_size = num;
118 +       cur = str;
119 +       end = str + cnt;
120 +       while (cur < end && i < num) {
121 +               while (cur < end && *cur == ' ')
122 +                       cur++;
123 +               value = simple_strtol(cur, &cur, 0);
124 +               if (ext4_mb_prealloc_table_add(sbi, value) == 0)
125 +                       ++i;
126 +       }
127 +       if (i != num)
128 +               sbi->s_mb_prealloc_table_size = i;
129 +
130 +       return cnt;
131 +}
132 +
133 +static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
134 +{
135 +       struct ext4_sb_info *sbi = EXT4_SB(m->private);
136 +       int i;
137 +
138 +       for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
139 +               seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
140 +       seq_printf(m, "\n");
141 +
142 +       return 0;
143 +}
144 +
145 +static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
146 +{
147 +       return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
148 +}
149 +
150 +const struct file_operations ext4_seq_prealloc_table_fops = {
151 +       .owner   = THIS_MODULE,
152 +       .open    = mb_prealloc_table_seq_open,
153 +       .read    = seq_read,
154 +       .llseek  = seq_lseek,
155 +       .release = single_release,
156 +       .write   = ext4_mb_prealloc_table_proc_write,
157 +};
158 +
159  static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
160  {
161         struct super_block *sb = PDE_DATA(inode);
162 @@ -2550,7 +2657,7 @@ static int ext4_groupinfo_create_slab(si
163  int ext4_mb_init(struct super_block *sb)
164  {
165         struct ext4_sb_info *sbi = EXT4_SB(sb);
166 -       unsigned i, j;
167 +       unsigned i, j, k, l;
168         unsigned offset, offset_incr;
169         unsigned max;
170         int ret;
171 @@ -2595,7 +2702,6 @@ int ext4_mb_init(struct super_block *sb)
172         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
173         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
174         sbi->s_mb_stats = MB_DEFAULT_STATS;
175 -       sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
176         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
177         /*
178          * The default group preallocation is 512, which for 4k block
179 @@ -2619,9 +2725,47 @@ int ext4_mb_init(struct super_block *sb)
180          * RAID stripe size so that preallocations don't fragment
181          * the stripes.
182          */
183 -       if (sbi->s_stripe > 1) {
184 -               sbi->s_mb_group_prealloc = roundup(
185 -                       sbi->s_mb_group_prealloc, sbi->s_stripe);
186 +
187 +       if (sbi->s_stripe == 0) {
188 +               sbi->s_mb_prealloc_table_size = 10;
189 +               i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
190 +               sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
191 +               if (sbi->s_mb_prealloc_table == NULL) {
192 +                       ret = -ENOMEM;
193 +                       goto out;
194 +               }
195 +               memset(sbi->s_mb_prealloc_table, 0, i);
196 +
197 +               for (k = 0, l = 4; k <= 9; ++k, l *= 2) {
198 +                       if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
199 +                               sbi->s_mb_prealloc_table_size = k;
200 +                               break;
201 +                       }
202 +               }
203 +
204 +               sbi->s_mb_small_req = 256;
205 +               sbi->s_mb_large_req = 1024;
206 +               sbi->s_mb_group_prealloc = 512;
207 +       } else {
208 +               sbi->s_mb_prealloc_table_size = 3;
209 +               i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
210 +               sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
211 +               if (sbi->s_mb_prealloc_table == NULL) {
212 +                       ret = -ENOMEM;
213 +                       goto out;
214 +               }
215 +               memset(sbi->s_mb_prealloc_table, 0, i);
216 +
217 +               for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2) {
218 +                       if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
219 +                               sbi->s_mb_prealloc_table_size = k;
220 +                               break;
221 +                       }
222 +               }
223 +
224 +               sbi->s_mb_small_req = sbi->s_stripe;
225 +               sbi->s_mb_large_req = sbi->s_stripe * 8;
226 +               sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
227         }
228  
229         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
230 @@ -2800,6 +2840,7 @@ int ext4_mb_release(struct super_block *sb)
231                 kvfree(group_info);
232                 rcu_read_unlock();
233         }
234 +       kfree(sbi->s_mb_prealloc_table);
235         kfree(sbi->s_mb_offsets);
236         kfree(sbi->s_mb_maxs);
237         iput(sbi->s_buddy_cache);
238 @@ -2963,9 +3107,9 @@ ext4_mb_normalize_request(struct ext4_al
239                                 struct ext4_allocation_request *ar)
240  {
241         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
242 -       int bsbits, max;
243 +       int bsbits, i, wind;
244         ext4_lblk_t end;
245 -       loff_t size, start_off;
246 +       loff_t size;
247         loff_t orig_size __maybe_unused;
248         ext4_lblk_t start;
249         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
250 @@ -2998,51 +3142,34 @@ ext4_mb_normalize_request(struct ext4_al
251         size = size << bsbits;
252         if (size < i_size_read(ac->ac_inode))
253                 size = i_size_read(ac->ac_inode);
254 -       orig_size = size;
255 +       size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
256  
257 -       /* max size of free chunks */
258 -       max = 2 << bsbits;
259 +       start = wind = 0;
260  
261 -#define NRL_CHECK_SIZE(req, size, max, chunk_size)     \
262 -               (req <= (size) || max <= (chunk_size))
263 +       /* let's choose preallocation window depending on file size */
264 +       for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
265 +               if (size <= sbi->s_mb_prealloc_table[i]) {
266 +                       wind = sbi->s_mb_prealloc_table[i];
267 +                       break;
268 +               }
269 +       }
270 +       size = wind;
271  
272 -       /* first, try to predict filesize */
273 -       /* XXX: should this table be tunable? */
274 -       start_off = 0;
275 -       if (size <= 16 * 1024) {
276 -               size = 16 * 1024;
277 -       } else if (size <= 32 * 1024) {
278 -               size = 32 * 1024;
279 -       } else if (size <= 64 * 1024) {
280 -               size = 64 * 1024;
281 -       } else if (size <= 128 * 1024) {
282 -               size = 128 * 1024;
283 -       } else if (size <= 256 * 1024) {
284 -               size = 256 * 1024;
285 -       } else if (size <= 512 * 1024) {
286 -               size = 512 * 1024;
287 -       } else if (size <= 1024 * 1024) {
288 -               size = 1024 * 1024;
289 -       } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
290 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
291 -                                               (21 - bsbits)) << 21;
292 -               size = 2 * 1024 * 1024;
293 -       } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
294 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
295 -                                                       (22 - bsbits)) << 22;
296 -               size = 4 * 1024 * 1024;
297 -       } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
298 -                                       (8<<20)>>bsbits, max, 8 * 1024)) {
299 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
300 -                                                       (23 - bsbits)) << 23;
301 -               size = 8 * 1024 * 1024;
302 -       } else {
303 -               start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
304 -               size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
305 -                                             ac->ac_o_ex.fe_len) << bsbits;
306 +       if (wind == 0) {
307 +               __u64 tstart, tend;
308 +               /* file is quite large, we now preallocate with
309 +                * the biggest configured window with regart to
310 +                * logical offset */
311 +               wind = sbi->s_mb_prealloc_table[i - 1];
312 +               tstart = ac->ac_o_ex.fe_logical;
313 +               do_div(tstart, wind);
314 +               start = tstart * wind;
315 +               tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
316 +               do_div(tend, wind);
317 +               tend = tend * wind + wind;
318 +               size = tend - start;
319         }
320 -       size = size >> bsbits;
321 -       start = start_off >> bsbits;
322 +       orig_size = size;
323  
324         /* don't cover already allocated blocks in selected range */
325         if (ar->pleft && start <= ar->lleft) {
326 @@ -3117,7 +3245,6 @@ ext4_mb_normalize_request(struct ext4_al
327         BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
328                         start > ac->ac_o_ex.fe_logical);
329         }
330 -       BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
331  
332         /* now prepare goal request */
333  
334 @@ -4056,11 +4183,19 @@ static void ext4_mb_group_or_file(struct
335  
336         /* don't use group allocation for large files */
337         size = max(size, isize);
338 -       if (size > sbi->s_mb_stream_request) {
339 +       if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
340 +           (size >= sbi->s_mb_large_req)) {
341                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
342                 return;
343         }
344  
345 +       /*
346 +        * request is so large that we don't care about
347 +        * streaming - it overweights any possible seek
348 +        */
349 +       if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
350 +               return;
351 +
352         BUG_ON(ac->ac_lg != NULL);
353         /*
354          * locality group prealloc space are per cpu. The reason for having
355 Index: linux-3.10.0-123.el7.x86_64/fs/ext4/sysfs.c
356 ===================================================================
357 --- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/sysfs.c
358 +++ linux-3.10.0-123.el7.x86_64/fs/ext4/sysfs.c
359 @@ -2555,7 +2555,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
360  EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
361  EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
362  EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
363 -EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
364 +EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
365 +EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
366  EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
367  EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
368  EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
369 @@ -2578,7 +2579,8 @@ static struct attribute *ext4_attrs[] =
370         ATTR_LIST(mb_max_to_scan),
371         ATTR_LIST(mb_min_to_scan),
372         ATTR_LIST(mb_order2_req),
373 -       ATTR_LIST(mb_stream_req),
374 +       ATTR_LIST(mb_small_req),
375 +       ATTR_LIST(mb_large_req),
376         ATTR_LIST(mb_group_prealloc),
377         ATTR_LIST(max_writeback_mb_bump),
378         ATTR_LIST(extent_max_zeroout_kb),
379 Index: linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
380 ===================================================================
381 --- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/inode.c
382 +++ linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
383 @@ -2476,6 +2476,9 @@ static int ext4_da_writepages(struct add
384         if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
385                 return -EROFS;
386  
387 +       if (wbc->nr_to_write < sbi->s_mb_small_req)
388 +               wbc->nr_to_write = sbi->s_mb_small_req;
389 +
390         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
391                 range_whole = 1;
392  
393 Index: linux-3.10.0-123.el7.x86_64/fs/ext4/sysfs.c
394 ===================================================================
395 --- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/sysfs.c
396 +++ linux-3.10.0-123.el7.x86_64/fs/ext4/sysfs.c
397 @@ -1243,6 +1243,7 @@ struct ext4_sb_info {
398         PROC_FILE_LIST(options),
399         PROC_FILE_LIST(es_shrinker_info),
400         PROC_FILE_LIST(mb_groups),
401 +       PROC_FILE_LIST(prealloc_table),
402         { NULL, NULL },
403  };
404