Whamcloud - gitweb
LU-13300 ldiskfs: port patches to improve extent status shrink
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel7.6 / ext4-change-LRU-to-round-robin-in-extent-status-tree.patch
1 From 4fac310a77c918d6a235a55cb76cf2f9bb22de71 Mon Sep 17 00:00:00 2001
2 From: Zheng Liu <wenqing.lz@taobao.com>
3 Date: Tue, 25 Nov 2014 11:45:37 -0500
4 Subject: [PATCH 3/7] ext4: change LRU to round-robin in extent status tree
5  shrinker
6
7 In this commit we discard the lru algorithm for inodes with extent
8 status tree because it takes significant effort to maintain a lru list
9 in extent status tree shrinker and the shrinker can take a long time to
10 scan this lru list in order to reclaim some objects.
11
12 We replace the lru ordering with a simple round-robin.  After that we
13 never need to keep a lru list.  That means that the list needn't be
14 sorted if the shrinker can not reclaim any objects in the first round.
15
16 Cc: Andreas Dilger <adilger.kernel@dilger.ca>
17 Signed-off-by: Zheng Liu <wenqing.lz@taobao.com>
18 Signed-off-by: Jan Kara <jack@suse.cz>
19 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
20 ---
21  fs/ext4/ext4.h           |  10 +-
22  fs/ext4/extents.c        |   4 +-
23  fs/ext4/extents_status.c | 221 +++++++++++++++++----------------------
24  fs/ext4/extents_status.h |   7 +-
25  fs/ext4/inode.c          |   4 +-
26  fs/ext4/ioctl.c          |   4 +-
27  fs/ext4/super.c          |   7 +-
28  7 files changed, 112 insertions(+), 145 deletions(-)
29
30 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
31 index cc5ba587..0813afd6 100644
32 --- a/fs/ext4/ext4.h
33 +++ b/fs/ext4/ext4.h
34 @@ -1017,10 +1017,9 @@ struct ext4_inode_info {
35         /* extents status tree */
36         struct ext4_es_tree i_es_tree;
37         rwlock_t i_es_lock;
38 -       struct list_head i_es_lru;
39 +       struct list_head i_es_list;
40         unsigned int i_es_all_nr;       /* protected by i_es_lock */
41 -       unsigned int i_es_lru_nr;       /* protected by i_es_lock */
42 -       unsigned long i_touch_when;     /* jiffies of last accessing */
43 +       unsigned int i_es_shk_nr;       /* protected by i_es_lock */
44  
45         /* ialloc */
46         ext4_group_t    i_last_alloc_group;
47 @@ -1482,9 +1481,10 @@ struct ext4_sb_info {
48  
49         /* Reclaim extents from extent status tree */
50         struct shrinker s_es_shrinker;
51 -       struct list_head s_es_lru;
52 +       struct list_head s_es_list;
53 +       long s_es_nr_inode;
54         struct ext4_es_stats s_es_stats;
55 -       spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
56 +       spinlock_t s_es_lock ____cacheline_aligned_in_smp;
57  
58         /* Ratelimit ext4 messages. */
59         struct ratelimit_state s_err_ratelimit_state;
60 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
61 index f618d0ba..c012dc51 100644
62 --- a/fs/ext4/extents.c
63 +++ b/fs/ext4/extents.c
64 @@ -4689,7 +4689,7 @@ out2:
65  
66         trace_ext4_ext_map_blocks_exit(inode, flags, map,
67                                        err ? err : allocated);
68 -       ext4_es_lru_add(inode);
69 +       ext4_es_list_add(inode);
70         return err ? err : allocated;
71  }
72  
73 @@ -5263,7 +5263,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
74                 error = ext4_fill_fiemap_extents(inode, start_blk,
75                                                  len_blks, fieinfo);
76         }
77 -       ext4_es_lru_add(inode);
78 +       ext4_es_list_add(inode);
79         return error;
80  }
81  
82 diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
83 index 7dfed27b..382a7bf9 100644
84 --- a/fs/ext4/extents_status.c
85 +++ b/fs/ext4/extents_status.c
86 @@ -149,8 +149,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
87                               ext4_lblk_t end);
88  static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
89                                        int nr_to_scan);
90 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
91 -                           struct ext4_inode_info *locked_ei);
92 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
93 +                      struct ext4_inode_info *locked_ei);
94  
95  int __init ext4_init_es(void)
96  {
97 @@ -298,6 +298,36 @@ out:
98         trace_ext4_es_find_delayed_extent_range_exit(inode, es);
99  }
100  
101 +void ext4_es_list_add(struct inode *inode)
102 +{
103 +       struct ext4_inode_info *ei = EXT4_I(inode);
104 +       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
105 +
106 +       if (!list_empty(&ei->i_es_list))
107 +               return;
108 +
109 +       spin_lock(&sbi->s_es_lock);
110 +       if (list_empty(&ei->i_es_list)) {
111 +               list_add_tail(&ei->i_es_list, &sbi->s_es_list);
112 +               sbi->s_es_nr_inode++;
113 +       }
114 +       spin_unlock(&sbi->s_es_lock);
115 +}
116 +
117 +void ext4_es_list_del(struct inode *inode)
118 +{
119 +       struct ext4_inode_info *ei = EXT4_I(inode);
120 +       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
121 +
122 +       spin_lock(&sbi->s_es_lock);
123 +       if (!list_empty(&ei->i_es_list)) {
124 +               list_del_init(&ei->i_es_list);
125 +               sbi->s_es_nr_inode--;
126 +               WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
127 +       }
128 +       spin_unlock(&sbi->s_es_lock);
129 +}
130 +
131  static struct extent_status *
132  ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
133                      ext4_fsblk_t pblk)
134 @@ -314,9 +344,9 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
135          * We don't count delayed extent because we never try to reclaim them
136          */
137         if (!ext4_es_is_delayed(es)) {
138 -               EXT4_I(inode)->i_es_lru_nr++;
139 +               EXT4_I(inode)->i_es_shk_nr++;
140                 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
141 -                                       s_es_stats.es_stats_lru_cnt);
142 +                                       s_es_stats.es_stats_shk_cnt);
143         }
144  
145         EXT4_I(inode)->i_es_all_nr++;
146 @@ -330,12 +360,12 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
147         EXT4_I(inode)->i_es_all_nr--;
148         percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
149  
150 -       /* Decrease the lru counter when this es is not delayed */
151 +       /* Decrease the shrink counter when this es is not delayed */
152         if (!ext4_es_is_delayed(es)) {
153 -               BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
154 -               EXT4_I(inode)->i_es_lru_nr--;
155 +               BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
156 +               EXT4_I(inode)->i_es_shk_nr--;
157                 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
158 -                                       s_es_stats.es_stats_lru_cnt);
159 +                                       s_es_stats.es_stats_shk_cnt);
160         }
161  
162         kmem_cache_free(ext4_es_cachep, es);
163 @@ -693,8 +723,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
164                 goto error;
165  retry:
166         err = __es_insert_extent(inode, &newes);
167 -       if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
168 -                                              EXT4_I(inode)))
169 +       if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
170 +                                         1, EXT4_I(inode)))
171                 goto retry;
172         if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
173                 err = 0;
174 @@ -851,8 +881,8 @@ retry:
175                                 es->es_lblk = orig_es.es_lblk;
176                                 es->es_len = orig_es.es_len;
177                                 if ((err == -ENOMEM) &&
178 -                                   __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
179 -                                                    EXT4_I(inode)))
180 +                                   __es_shrink(EXT4_SB(inode->i_sb),
181 +                                                       1, EXT4_I(inode)))
182                                         goto retry;
183                                 goto out;
184                         }
185 @@ -924,6 +954,11 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
186         end = lblk + len - 1;
187         BUG_ON(end < lblk);
188  
189 +       /*
190 +        * ext4_clear_inode() depends on us taking i_es_lock unconditionally
191 +        * so that we are sure __es_shrink() is done with the inode before it
192 +        * is reclaimed.
193 +        */
194         write_lock(&EXT4_I(inode)->i_es_lock);
195         err = __es_remove_extent(inode, lblk, end);
196         write_unlock(&EXT4_I(inode)->i_es_lock);
197 @@ -931,112 +966,77 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
198         return err;
199  }
200  
201 -static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
202 -                                    struct list_head *b)
203 -{
204 -       struct ext4_inode_info *eia, *eib;
205 -       eia = list_entry(a, struct ext4_inode_info, i_es_lru);
206 -       eib = list_entry(b, struct ext4_inode_info, i_es_lru);
207 -
208 -       if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
209 -           !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
210 -               return 1;
211 -       if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
212 -           ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
213 -               return -1;
214 -       if (eia->i_touch_when == eib->i_touch_when)
215 -               return 0;
216 -       if (time_after(eia->i_touch_when, eib->i_touch_when))
217 -               return 1;
218 -       else
219 -               return -1;
220 -}
221 -
222 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
223 -                           struct ext4_inode_info *locked_ei)
224 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
225 +                      struct ext4_inode_info *locked_ei)
226  {
227         struct ext4_inode_info *ei;
228         struct ext4_es_stats *es_stats;
229 -       struct list_head *cur, *tmp;
230 -       LIST_HEAD(skipped);
231         ktime_t start_time;
232         u64 scan_time;
233 +       int nr_to_walk;
234         int ret, nr_shrunk = 0;
235 -       int retried = 0, skip_precached = 1, nr_skipped = 0;
236 +       int retried = 0, nr_skipped = 0;
237  
238         es_stats = &sbi->s_es_stats;
239         start_time = ktime_get();
240 -       spin_lock(&sbi->s_es_lru_lock);
241  
242  retry:
243 -       list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
244 -               /*
245 -                * If we have already reclaimed all extents from extent
246 -                * status tree, just stop the loop immediately.
247 -                */
248 -               if (percpu_counter_read_positive(
249 -                               &es_stats->es_stats_lru_cnt) == 0)
250 -                       break;
251 -
252 -               ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
253 +       spin_lock(&sbi->s_es_lock);
254 +       nr_to_walk = sbi->s_es_nr_inode;
255 +       while (nr_to_walk-- > 0) {
256  
257 +               if (list_empty(&sbi->s_es_list)) {
258 +                       spin_unlock(&sbi->s_es_lock);
259 +                       goto out;
260 +               }
261 +               ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
262 +                                     i_es_list);
263 +               /* Move the inode to the tail */
264 +               list_move(&ei->i_es_list, sbi->s_es_list.prev);
265                 /*
266 -                * Skip the inode that is newer than the last_sorted
267 -                * time.  Normally we try hard to avoid shrinking
268 -                * precached inodes, but we will as a last resort.
269 +                * Normally we try hard to avoid shrinking precached inodes,
270 +                * but we will as a last resort.
271                  */
272 -               if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
273 -                   (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
274 -                                               EXT4_STATE_EXT_PRECACHED))) {
275 +               if (!retried && ext4_test_inode_state(&ei->vfs_inode,
276 +                                               EXT4_STATE_EXT_PRECACHED)) {
277                         nr_skipped++;
278 -                       list_move_tail(cur, &skipped);
279                         continue;
280                 }
281  
282 -               if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
283 -                   !write_trylock(&ei->i_es_lock))
284 -                       continue;
285 +               if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
286 +                       nr_skipped++;
287 +                       continue;
288 +               }
289 +               /*
290 +                * Now we hold i_es_lock which protects us from inode reclaim
291 +                * freeing inode under us
292 +                */
293 +               spin_unlock(&sbi->s_es_lock);
294  
295                 ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
296 -               if (ei->i_es_lru_nr == 0)
297 -                       list_del_init(&ei->i_es_lru);
298                 write_unlock(&ei->i_es_lock);
299  
300                 nr_shrunk += ret;
301                 nr_to_scan -= ret;
302                 if (nr_to_scan == 0)
303 -                       break;
304 +                       goto out;
305 +               spin_lock(&sbi->s_es_lock);
306         }
307  
308 -       /* Move the newer inodes into the tail of the LRU list. */
309 -       list_splice_tail(&skipped, &sbi->s_es_lru);
310 -       INIT_LIST_HEAD(&skipped);
311 +       spin_unlock(&sbi->s_es_lock);
312  
313         /*
314          * If we skipped any inodes, and we weren't able to make any
315 -        * forward progress, sort the list and try again.
316 +        * forward progress, try again to scan precached inodes.
317          */
318         if ((nr_shrunk == 0) && nr_skipped && !retried) {
319                 retried++;
320 -               list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
321 -               es_stats->es_stats_last_sorted = jiffies;
322 -               ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
323 -                                     i_es_lru);
324 -               /*
325 -                * If there are no non-precached inodes left on the
326 -                * list, start releasing precached extents.
327 -                */
328 -               if (ext4_test_inode_state(&ei->vfs_inode,
329 -                                         EXT4_STATE_EXT_PRECACHED))
330 -                       skip_precached = 0;
331                 goto retry;
332         }
333  
334 -       spin_unlock(&sbi->s_es_lru_lock);
335 -
336         if (locked_ei && nr_shrunk == 0)
337                 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
338 -
339 +out:
340         scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
341         if (likely(es_stats->es_stats_scan_time))
342                 es_stats->es_stats_scan_time = (scan_time +
343 @@ -1061,15 +1061,15 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
344         int nr_to_scan = sc->nr_to_scan;
345         int ret, nr_shrunk;
346  
347 -       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
348 +       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
349         trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
350  
351         if (!nr_to_scan)
352                 return ret;
353  
354 -       nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
355 +       nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
356  
357 -       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
358 +       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
359         trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
360         return ret;
361  }
362 @@ -1096,28 +1096,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
363                 return 0;
364  
365         /* here we just find an inode that has the max nr. of objects */
366 -       spin_lock(&sbi->s_es_lru_lock);
367 -       list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
368 +       spin_lock(&sbi->s_es_lock);
369 +       list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
370                 inode_cnt++;
371                 if (max && max->i_es_all_nr < ei->i_es_all_nr)
372                         max = ei;
373                 else if (!max)
374                         max = ei;
375         }
376 -       spin_unlock(&sbi->s_es_lru_lock);
377 +       spin_unlock(&sbi->s_es_lock);
378  
379         seq_printf(seq, "stats:\n  %lld objects\n  %lld reclaimable objects\n",
380                    percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
381 -                  percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
382 +                  percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
383         seq_printf(seq, "  %lu/%lu cache hits/misses\n",
384                    es_stats->es_stats_cache_hits,
385                    es_stats->es_stats_cache_misses);
386 -       if (es_stats->es_stats_last_sorted != 0)
387 -               seq_printf(seq, "  %u ms last sorted interval\n",
388 -                          jiffies_to_msecs(jiffies -
389 -                                           es_stats->es_stats_last_sorted));
390         if (inode_cnt)
391 -               seq_printf(seq, "  %d inodes on lru list\n", inode_cnt);
392 +               seq_printf(seq, "  %d inodes on list\n", inode_cnt);
393  
394         seq_printf(seq, "average:\n  %llu us scan time\n",
395             div_u64(es_stats->es_stats_scan_time, 1000));
396 @@ -1126,7 +1122,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
397                 seq_printf(seq,
398                     "maximum:\n  %lu inode (%u objects, %u reclaimable)\n"
399                     "  %llu us max scan time\n",
400 -                   max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
401 +                   max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
402                     div_u64(es_stats->es_stats_max_scan_time, 1000));
403  
404         return 0;
405 @@ -1175,9 +1171,9 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
406  {
407         int err;
408  
409 -       INIT_LIST_HEAD(&sbi->s_es_lru);
410 -       spin_lock_init(&sbi->s_es_lru_lock);
411 -       sbi->s_es_stats.es_stats_last_sorted = 0;
412 +       INIT_LIST_HEAD(&sbi->s_es_list);
413 +       sbi->s_es_nr_inode = 0;
414 +       spin_lock_init(&sbi->s_es_lock);
415         sbi->s_es_stats.es_stats_shrunk = 0;
416         sbi->s_es_stats.es_stats_cache_hits = 0;
417         sbi->s_es_stats.es_stats_cache_misses = 0;
418 @@ -1187,7 +1183,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
419                                   0, GFP_KERNEL);
420         if (err)
421                 return err;
422 -       err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt,
423 +       err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt,
424                                   0, GFP_KERNEL);
425         if (err)
426                 goto err;
427 @@ -1211,37 +1207,10 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
428         if (sbi->s_proc)
429                 remove_proc_entry("es_shrinker_info", sbi->s_proc);
430         percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
431 -       percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
432 +       percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
433         unregister_shrinker(&sbi->s_es_shrinker);
434  }
435  
436 -void ext4_es_lru_add(struct inode *inode)
437 -{
438 -       struct ext4_inode_info *ei = EXT4_I(inode);
439 -       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
440 -
441 -       ei->i_touch_when = jiffies;
442 -
443 -       if (!list_empty(&ei->i_es_lru))
444 -               return;
445 -
446 -       spin_lock(&sbi->s_es_lru_lock);
447 -       if (list_empty(&ei->i_es_lru))
448 -               list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
449 -       spin_unlock(&sbi->s_es_lru_lock);
450 -}
451 -
452 -void ext4_es_lru_del(struct inode *inode)
453 -{
454 -       struct ext4_inode_info *ei = EXT4_I(inode);
455 -       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
456 -
457 -       spin_lock(&sbi->s_es_lru_lock);
458 -       if (!list_empty(&ei->i_es_lru))
459 -               list_del_init(&ei->i_es_lru);
460 -       spin_unlock(&sbi->s_es_lru_lock);
461 -}
462 -
463  static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
464                                        int nr_to_scan)
465  {
466 @@ -1253,7 +1222,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
467         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
468                                       DEFAULT_RATELIMIT_BURST);
469  
470 -       if (ei->i_es_lru_nr == 0)
471 +       if (ei->i_es_shk_nr == 0)
472                 return 0;
473  
474         if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
475 diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
476 index efd5f970..0e6a33e8 100644
477 --- a/fs/ext4/extents_status.h
478 +++ b/fs/ext4/extents_status.h
479 @@ -65,14 +65,13 @@ struct ext4_es_tree {
480  };
481  
482  struct ext4_es_stats {
483 -       unsigned long es_stats_last_sorted;
484         unsigned long es_stats_shrunk;
485         unsigned long es_stats_cache_hits;
486         unsigned long es_stats_cache_misses;
487         u64 es_stats_scan_time;
488         u64 es_stats_max_scan_time;
489         struct percpu_counter es_stats_all_cnt;
490 -       struct percpu_counter es_stats_lru_cnt;
491 +       struct percpu_counter es_stats_shk_cnt;
492  };
493  
494  extern int __init ext4_init_es(void);
495 @@ -151,7 +150,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es,
496  
497  extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
498  extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
499 -extern void ext4_es_lru_add(struct inode *inode);
500 -extern void ext4_es_lru_del(struct inode *inode);
501 +extern void ext4_es_list_add(struct inode *inode);
502 +extern void ext4_es_list_del(struct inode *inode);
503  
504  #endif /* _EXT4_EXTENTS_STATUS_H */
505 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
506 index 21db5952..f6a2764c 100644
507 --- a/fs/ext4/inode.c
508 +++ b/fs/ext4/inode.c
509 @@ -523,7 +523,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
510  
511         /* Lookup extent status tree firstly */
512         if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
513 -               ext4_es_lru_add(inode);
514 +               ext4_es_list_add(inode);
515                 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
516                         map->m_pblk = ext4_es_pblock(&es) +
517                                         map->m_lblk - es.es_lblk;
518 @@ -1519,7 +1519,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
519  
520         /* Lookup extent status tree firstly */
521         if (ext4_es_lookup_extent(inode, iblock, &es)) {
522 -               ext4_es_lru_add(inode);
523 +               ext4_es_list_add(inode);
524                 if (ext4_es_is_hole(&es)) {
525                         retval = 0;
526                         down_read(&EXT4_I(inode)->i_data_sem);
527 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
528 index 858cf709..122d517c 100644
529 --- a/fs/ext4/ioctl.c
530 +++ b/fs/ext4/ioctl.c
531 @@ -80,8 +80,8 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
532         memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
533         ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
534         ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
535 -       ext4_es_lru_del(inode1);
536 -       ext4_es_lru_del(inode2);
537 +       ext4_es_list_del(inode1);
538 +       ext4_es_list_del(inode2);
539  
540         isize = i_size_read(inode1);
541         i_size_write(inode1, i_size_read(inode2));
542 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
543 index 95a01d56..ea2a1026 100644
544 --- a/fs/ext4/super.c
545 +++ b/fs/ext4/super.c
546 @@ -942,10 +942,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
547         spin_lock_init(&ei->i_prealloc_lock);
548         ext4_es_init_tree(&ei->i_es_tree);
549         rwlock_init(&ei->i_es_lock);
550 -       INIT_LIST_HEAD(&ei->i_es_lru);
551 +       INIT_LIST_HEAD(&ei->i_es_list);
552         ei->i_es_all_nr = 0;
553 -       ei->i_es_lru_nr = 0;
554 -       ei->i_touch_when = 0;
555 +       ei->i_es_shk_nr = 0;
556         ei->i_reserved_data_blocks = 0;
557         ei->i_reserved_meta_blocks = 0;
558         ei->i_allocated_meta_blocks = 0;
559 @@ -1034,7 +1033,7 @@ void ext4_clear_inode(struct inode *inode)
560         dquot_drop(inode);
561         ext4_discard_preallocations(inode);
562         ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
563 -       ext4_es_lru_del(inode);
564 +       ext4_es_list_del(inode);
565         if (EXT4_I(inode)->jinode) {
566                 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
567                                                EXT4_I(inode)->jinode);
568 -- 
569 2.24.1
570