Whamcloud - gitweb
LU-17672 ldiskfs: release s_mb_prealloc_table
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel8 / ext4-prealloc.patch
1 commit d8d8fd9192a54c7b8caef8cca9b7a1eb5e5e3298
2 Author: Alex Zhuravlev <alex.zhuravlev@sun.com>
3 AuthorDate: Thu Oct 23 10:02:19 2008 +0000
4
5 Subject: ext4: support for tunable preallocation window
6 Add support for tunable preallocation window and new tunables
7 for large/small requests.
8
9 Bugzilla-ID: b=12800
10 Signed-off-by: Alex Zhuravlev <alex.zhuravlev@sun.com>
11 Reviewed-by: Kalpak Shah <kalpak@clusterfs.com>
12 Reviewed-by: Andreas Dilger <andreas.dilger@sun.com>
13
14 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
15 ===================================================================
16 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
17 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
18 @@ -1185,6 +1185,8 @@ extern void ext4_set_bits(void *bm, int
19  /* Metadata checksum algorithm codes */
20  #define EXT4_CRC32C_CHKSUM             1
21  
22 +#define EXT4_MAX_PREALLOC_TABLE        64
23 +
24  /*
25   * Structure of the super block
26   */
27 @@ -1418,11 +1420,13 @@ struct ext4_sb_info {
28  
29         /* tunables */
30         unsigned long s_stripe;
31 -       unsigned int s_mb_stream_request;
32 +       unsigned long s_mb_small_req;
33 +       unsigned long s_mb_large_req;
34         unsigned int s_mb_max_to_scan;
35         unsigned int s_mb_min_to_scan;
36         unsigned int s_mb_stats;
37         unsigned int s_mb_order2_reqs;
38 +       unsigned long *s_mb_prealloc_table;
39         unsigned int s_mb_group_prealloc;
40         unsigned int s_max_dir_size_kb;
41         /* where last allocation was done - for stream allocation */
42 @@ -2397,6 +2401,7 @@ extern int ext4_init_inode_table(struct
43  extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
44  
45  /* mballoc.c */
46 +extern const struct file_operations ext4_seq_prealloc_table_fops;
47  extern const struct seq_operations ext4_mb_seq_groups_ops;
48  extern long ext4_mb_stats;
49  extern long ext4_mb_max_to_scan;
50 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
51 ===================================================================
52 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/inode.c
53 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
54 @@ -2769,6 +2769,9 @@ static int ext4_writepages(struct addres
55                 ext4_journal_stop(handle);
56         }
57  
58 +       if (wbc->nr_to_write < sbi->s_mb_small_req)
59 +               wbc->nr_to_write = sbi->s_mb_small_req;
60 +
61         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
62                 range_whole = 1;
63  
64 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
65 ===================================================================
66 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.c
67 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
68 @@ -2339,6 +2339,100 @@ const struct seq_operations ext4_mb_seq_
69         .show   = ext4_mb_seq_groups_show,
70  };
71  
72 +static int ext4_mb_check_and_update_prealloc(struct ext4_sb_info *sbi,
73 +                                                char *str, size_t cnt,
74 +                                                int update)
75 +{
76 +       unsigned long value;
77 +       unsigned long prev = 0;
78 +       char *cur;
79 +       char *next;
80 +       char *end;
81 +       int num = 0;
82 +
83 +       cur = str;
84 +       end = str + cnt;
85 +       while (cur < end) {
86 +               while ((cur < end) && (*cur == ' ')) cur++;
87 +               value = simple_strtol(cur, &next, 0);
88 +               if (value == 0)
89 +                       break;
90 +               if (cur == next)
91 +                       return -EINVAL;
92 +
93 +               cur = next;
94 +
95 +               if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
96 +                       return -EINVAL;
97 +
98 +               /* they should add values in order */
99 +               if (value <= prev)
100 +                       return -EINVAL;
101 +
102 +               if (update)
103 +                       sbi->s_mb_prealloc_table[num] = value;
104 +
105 +               prev = value;
106 +               num++;
107 +       }
108 +
109 +       if (num > EXT4_MAX_PREALLOC_TABLE - 1)
110 +               return -EOVERFLOW;
111 +
112 +       if (update)
113 +               sbi->s_mb_prealloc_table[num] = 0;
114 +
115 +       return 0;
116 +}
117 +
118 +static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
119 +                                            const char __user *buf,
120 +                                            size_t cnt, loff_t *pos)
121 +{
122 +       struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
123 +       char str[128];
124 +       int rc;
125 +
126 +       if (cnt >= sizeof(str))
127 +               return -EINVAL;
128 +       if (copy_from_user(str, buf, cnt))
129 +               return -EFAULT;
130 +
131 +       rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 0);
132 +       if (rc)
133 +               return rc;
134 +
135 +       rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 1);
136 +       return rc ? rc : cnt;
137 +}
138 +
139 +static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
140 +{
141 +       struct ext4_sb_info *sbi = EXT4_SB(m->private);
142 +       int i;
143 +
144 +       for (i = 0; i < EXT4_MAX_PREALLOC_TABLE &&
145 +                       sbi->s_mb_prealloc_table[i] != 0; i++)
146 +               seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
147 +       seq_printf(m, "\n");
148 +
149 +       return 0;
150 +}
151 +
152 +static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
153 +{
154 +       return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
155 +}
156 +
157 +const struct file_operations ext4_seq_prealloc_table_fops = {
158 +       .owner   = THIS_MODULE,
159 +       .open    = mb_prealloc_table_seq_open,
160 +       .read    = seq_read,
161 +       .llseek  = seq_lseek,
162 +       .release = single_release,
163 +       .write   = ext4_mb_prealloc_table_proc_write,
164 +};
165 +
166  static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
167  {
168         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
169 @@ -2566,7 +2660,7 @@ static int ext4_groupinfo_create_slab(si
170  int ext4_mb_init(struct super_block *sb)
171  {
172         struct ext4_sb_info *sbi = EXT4_SB(sb);
173 -       unsigned i, j;
174 +       unsigned i, j, k, l;
175         unsigned offset, offset_incr;
176         unsigned max;
177         int ret;
178 @@ -2615,7 +2709,6 @@ int ext4_mb_init(struct super_block *sb)
179         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
180         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
181         sbi->s_mb_stats = MB_DEFAULT_STATS;
182 -       sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
183         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
184         /*
185          * The default group preallocation is 512, which for 4k block
186 @@ -2639,9 +2732,29 @@ int ext4_mb_init(struct super_block *sb)
187          * RAID stripe size so that preallocations don't fragment
188          * the stripes.
189          */
190 -       if (sbi->s_stripe > 1) {
191 -               sbi->s_mb_group_prealloc = roundup(
192 -                       sbi->s_mb_group_prealloc, sbi->s_stripe);
193 +
194 +       /* Allocate table once */
195 +       sbi->s_mb_prealloc_table = kzalloc(
196 +               EXT4_MAX_PREALLOC_TABLE * sizeof(unsigned long), GFP_NOFS);
197 +       if (sbi->s_mb_prealloc_table == NULL) {
198 +               ret = -ENOMEM;
199 +               goto out;
200 +       }
201 +
202 +       if (sbi->s_stripe == 0) {
203 +               for (k = 0, l = 4; k <= 9; ++k, l *= 2)
204 +                       sbi->s_mb_prealloc_table[k] = l;
205 +
206 +               sbi->s_mb_small_req = 256;
207 +               sbi->s_mb_large_req = 1024;
208 +               sbi->s_mb_group_prealloc = 512;
209 +       } else {
210 +               for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2)
211 +                       sbi->s_mb_prealloc_table[k] = l;
212 +
213 +               sbi->s_mb_small_req = sbi->s_stripe;
214 +               sbi->s_mb_large_req = sbi->s_stripe * 8;
215 +               sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
216         }
217  
218         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
219 @@ -2669,6 +2782,7 @@ out_free_locality_groups:
220         free_percpu(sbi->s_locality_groups);
221         sbi->s_locality_groups = NULL;
222  out:
223 +       kfree(sbi->s_mb_prealloc_table);
224         kfree(sbi->s_mb_offsets);
225         sbi->s_mb_offsets = NULL;
226         kfree(sbi->s_mb_maxs);
227 @@ -2800,6 +2840,7 @@ int ext4_mb_release(struct super_block *sb)
228                 kvfree(group_info);
229                 rcu_read_unlock();
230         }
231 +       kfree(sbi->s_mb_prealloc_table);
232         kfree(sbi->s_mb_offsets);
233         kfree(sbi->s_mb_maxs);
234         iput(sbi->s_buddy_cache);
235 @@ -2930,7 +3044,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
236         int err, len;
237  
238         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
239 -       BUG_ON(ac->ac_b_ex.fe_len <= 0);
240  
241         sb = ac->ac_sb;
242         sbi = EXT4_SB(sb);
243 @@ -3060,13 +3173,14 @@ ext4_mb_normalize_request(struct ext4_al
244                                 struct ext4_allocation_request *ar)
245  {
246         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
247 -       int bsbits, max;
248 +       int bsbits, i, wind;
249         ext4_lblk_t end;
250 -       loff_t size, start_off;
251 +       loff_t size;
252         loff_t orig_size __maybe_unused;
253         ext4_lblk_t start;
254         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
255         struct ext4_prealloc_space *pa;
256 +       unsigned long value, last_non_zero;
257  
258         /* do normalize only data requests, metadata requests
259            do not need preallocation */
260 @@ -3095,51 +3209,46 @@ ext4_mb_normalize_request(struct ext4_al
261         size = size << bsbits;
262         if (size < i_size_read(ac->ac_inode))
263                 size = i_size_read(ac->ac_inode);
264 -       orig_size = size;
265 +       size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
266 +
267 +       start = wind = 0;
268 +       value = last_non_zero = 0;
269  
270 -       /* max size of free chunks */
271 -       max = 2 << bsbits;
272 +       /* let's choose preallocation window depending on file size */
273 +       for (i = 0; i < EXT4_MAX_PREALLOC_TABLE; i++) {
274 +               value = sbi->s_mb_prealloc_table[i];
275 +               if (value == 0)
276 +                       break;
277 +               else
278 +                       last_non_zero = value;
279  
280 -#define NRL_CHECK_SIZE(req, size, max, chunk_size)     \
281 -               (req <= (size) || max <= (chunk_size))
282 +               if (size <= value) {
283 +                       wind = value;
284 +                       break;
285 +               }
286 +       }
287  
288 -       /* first, try to predict filesize */
289 -       /* XXX: should this table be tunable? */
290 -       start_off = 0;
291 -       if (size <= 16 * 1024) {
292 -               size = 16 * 1024;
293 -       } else if (size <= 32 * 1024) {
294 -               size = 32 * 1024;
295 -       } else if (size <= 64 * 1024) {
296 -               size = 64 * 1024;
297 -       } else if (size <= 128 * 1024) {
298 -               size = 128 * 1024;
299 -       } else if (size <= 256 * 1024) {
300 -               size = 256 * 1024;
301 -       } else if (size <= 512 * 1024) {
302 -               size = 512 * 1024;
303 -       } else if (size <= 1024 * 1024) {
304 -               size = 1024 * 1024;
305 -       } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
306 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
307 -                                               (21 - bsbits)) << 21;
308 -               size = 2 * 1024 * 1024;
309 -       } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
310 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
311 -                                                       (22 - bsbits)) << 22;
312 -               size = 4 * 1024 * 1024;
313 -       } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
314 -                                       (8<<20)>>bsbits, max, 8 * 1024)) {
315 -               start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
316 -                                                       (23 - bsbits)) << 23;
317 -               size = 8 * 1024 * 1024;
318 +       if (wind == 0) {
319 +               if (last_non_zero != 0) {
320 +                       __u64 tstart, tend;
321 +                       /* file is quite large, we now preallocate with
322 +                       * the biggest configured window with regart to
323 +                       * logical offset */
324 +                       wind = last_non_zero;
325 +                       tstart = ac->ac_o_ex.fe_logical;
326 +                       do_div(tstart, wind);
327 +                       start = tstart * wind;
328 +                       tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
329 +                       do_div(tend, wind);
330 +                       tend = tend * wind + wind;
331 +                       size = tend - start;
332 +               }
333         } else {
334 -               start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
335 -               size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
336 -                                             ac->ac_o_ex.fe_len) << bsbits;
337 +               size = wind;
338         }
339 -       size = size >> bsbits;
340 -       start = start_off >> bsbits;
341 +
342 +
343 +       orig_size = size;
344  
345         /* don't cover already allocated blocks in selected range */
346         if (ar->pleft && start <= ar->lleft) {
347 @@ -3221,7 +3330,6 @@ ext4_mb_normalize_request(struct ext4_al
348                          (unsigned long) ac->ac_o_ex.fe_logical);
349                 BUG();
350         }
351 -       BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
352  
353         /* now prepare goal request */
354  
355 @@ -4190,11 +4298,19 @@ static void ext4_mb_group_or_file(struct
356  
357         /* don't use group allocation for large files */
358         size = max(size, isize);
359 -       if (size > sbi->s_mb_stream_request) {
360 +       if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
361 +           (size >= sbi->s_mb_large_req)) {
362                 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
363                 return;
364         }
365  
366 +       /*
367 +        * request is so large that we don't care about
368 +        * streaming - it overweights any possible seek
369 +        */
370 +       if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
371 +               return;
372 +
373         BUG_ON(ac->ac_lg != NULL);
374         /*
375          * locality group prealloc space are per cpu. The reason for having
376 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
377 ===================================================================
378 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/sysfs.c
379 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
380 @@ -173,7 +173,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
381  EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
382  EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
383  EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
384 -EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
385 +EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
386 +EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
387  EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
388  EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
389  EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
390 @@ -201,7 +202,8 @@ static struct attribute *ext4_attrs[] =
391         ATTR_LIST(mb_max_to_scan),
392         ATTR_LIST(mb_min_to_scan),
393         ATTR_LIST(mb_order2_req),
394 -       ATTR_LIST(mb_stream_req),
395 +       ATTR_LIST(mb_small_req),
396 +       ATTR_LIST(mb_large_req),
397         ATTR_LIST(mb_group_prealloc),
398         ATTR_LIST(max_writeback_mb_bump),
399         ATTR_LIST(extent_max_zeroout_kb),
400 @@ -379,6 +381,8 @@ int ext4_register_sysfs(struct super_blo
401                                 sb);
402                 proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
403                                 &ext4_mb_seq_groups_ops, sb);
404 +               proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
405 +                               &ext4_seq_prealloc_table_fops, sb);
406         }
407         return 0;
408  }