Whamcloud - gitweb
LU-12988 ldiskfs: mballoc to prefetch groups
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel8 / ext4-mballoc-prefetch.patch
1 --- linux-4.18/fs/ext4/balloc.c 2019-11-28 14:55:26.506546036 +0300
2 +++ linux-4.18/fs/ext4/balloc.c 2019-12-02 11:21:50.565975537 +0300
3 @@ -404,7 +404,8 @@ verified:
4   * Return buffer_head on success or NULL in case of failure.
5   */
6  struct buffer_head *
7 -ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
8 +ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
9 +                                int ignore_locked)
10  {
11         struct ext4_group_desc *desc;
12         struct ext4_sb_info *sbi = EXT4_SB(sb);
13 @@ -435,6 +436,13 @@ ext4_read_block_bitmap_nowait(struct
14         if (bitmap_uptodate(bh))
15                 goto verify;
16  
17 +       if (ignore_locked && buffer_locked(bh)) {
18 +               /* buffer under IO already, do not wait
19 +                * if called for prefetching */
20 +               put_bh(bh);
21 +               return NULL;
22 +       }
23 +
24         lock_buffer(bh);
25         if (bitmap_uptodate(bh)) {
26                 unlock_buffer(bh);
27 @@ -524,7 +532,7 @@ ext4_read_block_bitmap(struct super_b
28         struct buffer_head *bh;
29         int err;
30  
31 -       bh = ext4_read_block_bitmap_nowait(sb, block_group);
32 +       bh = ext4_read_block_bitmap_nowait(sb, block_group, 1);
33         if (IS_ERR(bh))
34                 return bh;
35         err = ext4_wait_block_bitmap(sb, block_group, bh);
36 --- linux-4.18/fs/ext4/ext4.h   2019-11-28 14:55:26.470545343 +0300
37 +++ linux-4.18/fs/ext4/ext4.h   2019-12-02 11:21:40.795779972 +0300
38 @@ -1446,6 +1446,8 @@ struct ext4_sb_info {
39         /* where last allocation was done - for stream allocation */
40         unsigned long s_mb_last_group;
41         unsigned long s_mb_last_start;
42 +       unsigned int s_mb_prefetch;
43 +       unsigned int s_mb_prefetch_limit;
44  
45         /* stats for buddy allocator */
46         atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
47 @@ -2401,7 +2403,8 @@ extern struct ext4_group_desc * ldisk
48  extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
49  
50  extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
51 -                                               ext4_group_t block_group);
52 +                                               ext4_group_t block_group,
53 +                                               int ignore_locked);
54  extern int ext4_wait_block_bitmap(struct super_block *sb,
55                                   ext4_group_t block_group,
56                                   struct buffer_head *bh);
57 --- linux-4.18/fs/ext4/mballoc.c        2019-11-28 14:55:26.500545920 +0300
58 +++ linux-4.18/fs/ext4/mballoc.c        2019-12-02 11:21:46.656897291 +0300
59 @@ -868,7 +868,7 @@ static int ext4_mb_init_cache(struct
60                         bh[i] = NULL;
61                         continue;
62                 }
63 -               bh[i] = ext4_read_block_bitmap_nowait(sb, group);
64 +               bh[i] = ext4_read_block_bitmap_nowait(sb, group, 0);
65                 if (IS_ERR(bh[i])) {
66                         err = PTR_ERR(bh[i]);
67                         bh[i] = NULL;
68 @@ -2104,6 +2112,87 @@ static int ext4_mb_good_group(struct
69         return 0;
70  }
71  
72 +/*
73 + * each allocation context (i.e. a thread doing allocation) has own
74 + * sliding prefetch window of @s_mb_prefetch size which starts at the
75 + * very first goal and moves ahead of scaning.
76 + * a side effect is that subsequent allocations will likely find
77 + * the bitmaps in cache or at least in-flight.
78 + */
79 +static void
80 +ext4_mb_prefetch(struct ext4_allocation_context *ac,
81 +                   ext4_group_t start)
82 +{
83 +       struct super_block *sb = ac->ac_sb;
84 +       ext4_group_t ngroups = ext4_get_groups_count(sb);
85 +       struct ext4_sb_info *sbi = EXT4_SB(sb);
86 +       struct ext4_group_info *grp;
87 +       ext4_group_t group = start;
88 +       struct buffer_head *bh;
89 +       int nr;
90 +
91 +       /* limit prefetching at cr=0, otherwise mballoc can
92 +        * spend a lot of time loading imperfect groups */
93 +       if (ac->ac_criteria < 2 && ac->ac_prefetch_ios >= sbi->s_mb_prefetch_limit)
94 +               return;
95 +
96 +       /* batch prefetching to get few READs in flight */
97 +       nr = ac->ac_prefetch - group;
98 +       if (ac->ac_prefetch < group)
99 +               /* wrapped to the first groups */
100 +               nr += ngroups;
101 +       if (nr > 0)
102 +               return;
103 +       BUG_ON(nr < 0);
104 +
105 +       nr = sbi->s_mb_prefetch;
106 +       if (ext4_has_feature_flex_bg(sb)) {
107 +               /* align to flex_bg to get more bitmas with a single IO */
108 +               nr = (group / sbi->s_mb_prefetch) * sbi->s_mb_prefetch;
109 +               nr = nr + sbi->s_mb_prefetch - group;
110 +       }
111 +       while (nr-- > 0) {
112 +               grp = ext4_get_group_info(sb, group);
113 +               /* ignore empty groups - those will be skipped
114 +                * during the scanning as well */
115 +               if (grp->bb_free > 0 && EXT4_MB_GRP_NEED_INIT(grp)) {
116 +                       bh = ext4_read_block_bitmap_nowait(sb, group, 1);
117 +                       if (bh && !IS_ERR(bh)) {
118 +                               if (!buffer_uptodate(bh))
119 +                                       ac->ac_prefetch_ios++;
120 +                               brelse(bh);
121 +                       }
122 +               }
123 +               if (++group >= ngroups)
124 +                       group = 0;
125 +       }
126 +       ac->ac_prefetch = group;
127 +}
128 +
129 +static void
130 +ext4_mb_prefetch_fini(struct ext4_allocation_context *ac)
131 +{
132 +       struct ext4_group_info *grp;
133 +       ext4_group_t group;
134 +       int nr, rc;
135 +
136 +       /* initialize last window of prefetched groups */
137 +       nr = ac->ac_prefetch_ios;
138 +       if (nr > EXT4_SB(ac->ac_sb)->s_mb_prefetch)
139 +               nr = EXT4_SB(ac->ac_sb)->s_mb_prefetch;
140 +       group = ac->ac_prefetch;
141 +       while (nr-- > 0) {
142 +               grp = ext4_get_group_info(ac->ac_sb, group);
143 +               if (grp->bb_free > 0 && EXT4_MB_GRP_NEED_INIT(grp)) {
144 +                       rc = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
145 +                       if (rc)
146 +                               break;
147 +               }
148 +               if (group-- == 0)
149 +                       group = ext4_get_groups_count(ac->ac_sb) - 1;
150 +       }
151 +}
152 +
153  static noinline_for_stack int
154  ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
155  {
156 @@ -2176,6 +2264,7 @@ repeat:
157                  * searching for the right group start
158                  * from the goal value specified
159                  */
160                 group = ac->ac_g_ex.fe_group + 1;
161 +               ac->ac_prefetch = group;
162  
163                 for (i = 0; i < ngroups; group++, i++) {
164                         int ret = 0;
165 @@ -2188,6 +2277,8 @@ repeat:
166                         if (group >= ngroups)
167                                 group = 0;
168  
169 +                       ext4_mb_prefetch(ac, group);
170 +
171                         /* This now checks without needing the buddy page */
172                         ret = ext4_mb_good_group(ac, group, cr);
173                         if (ret <= 0) {
174 @@ -2260,6 +2351,8 @@ repeat:
175  out:
176         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
177                 err = first_err;
178 +       /* use prefetched bitmaps to init buddy so that read info is not lost */
179 +       ext4_mb_prefetch_fini(ac);
180         return err;
181  }
182  
183 @@ -2832,6 +2925,22 @@ int ext4_mb_init(struct super_block *
184                 sbi->s_mb_large_req = sbi->s_stripe * 8;
185                 sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
186         }
187 +       if (ext4_has_feature_flex_bg(sb)) {
188 +               /* a single flex group is supposed to be read by a single IO */
189 +               sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
190 +               sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
191 +       } else {
192 +               sbi->s_mb_prefetch = 32;
193 +       }
194 +       if (sbi->s_mb_prefetch >= ext4_get_groups_count(sb))
195 +               sbi->s_mb_prefetch = ext4_get_groups_count(sb);
196 +       /* now many real IOs to prefetch within a single allocation at cr=0
197 +        * given cr=0 is an CPU-related optimization we shouldn't try to
198 +        * load too many groups, at some point we should start to use what
199 +        * we've got in memory.
200 +        * with an average random access time 5ms, it'd take a second to get
201 +        * 200 groups (* N with flex_bg), so let's make this limit 32 */
202 +       sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 32;
203  
204         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
205         if (sbi->s_locality_groups == NULL) {
206 --- linux-4.18/fs/ext4/mballoc.h        2019-11-28 14:55:26.471545362 +0300
207 +++ linux-4.18/fs/ext4/mballoc.h        2019-12-02 11:21:57.028104886 +0300
208 @@ -177,6 +177,8 @@ struct ext4_allocation_context {
209         struct page *ac_buddy_page;
210         struct ext4_prealloc_space *ac_pa;
211         struct ext4_locality_group *ac_lg;
212 +       ext4_group_t ac_prefetch;
213 +       int ac_prefetch_ios; /* number of initialied prefetch IO */
214  };
215  
216  #define AC_STATUS_CONTINUE     1
217 --- linux-4.18/fs/ext4/sysfs.c  2019-11-28 14:55:26.502545959 +0300
218 +++ linux-4.18/fs/ext4/sysfs.c  2019-11-28 20:07:48.104558177 +0300
219 @@ -190,6 +190,8 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_bur
220  EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
221  EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time);
222  EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time);
223 +EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
224 +EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
225  
226  static unsigned int old_bump_val = 128;
227  EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
228 @@ -223,6 +224,8 @@ static struct attribute *ext4_attrs[]
229         ATTR_LIST(errors_count),
230         ATTR_LIST(first_error_time),
231         ATTR_LIST(last_error_time),
232 +       ATTR_LIST(mb_prefetch),
233 +       ATTR_LIST(mb_prefetch_limit),
234         NULL,
235  };
236