Whamcloud - gitweb
LU-13839 kernel: RHEL 8.3 server support
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel8.3 / ext4-pdirop.patch
1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
12
13 This patch contains:
14  - pdirops support for ldiskfs
15  - integrate with osd-ldiskfs
16
17 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/Makefile
18 ===================================================================
19 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/Makefile
20 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/Makefile
21 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
22  
23  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
24                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
25 +               htree_lock.o \
26                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
27                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
28                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
29 Index: linux-4.18.0-240.1.1.el8/fs/ext4/ext4.h
30 ===================================================================
31 --- linux-4.18.0-240.1.1.el8.orig/fs/ext4/ext4.h
32 +++ linux-4.18.0-240.1.1.el8/fs/ext4/ext4.h
33 @@ -29,6 +29,7 @@
34  #include <linux/timer.h>
35  #include <linux/version.h>
36  #include <linux/wait.h>
37 +#include <linux/htree_lock.h>
38  #include <linux/sched/signal.h>
39  #include <linux/blockgroup_lock.h>
40  #include <linux/percpu_counter.h>
41 @@ -946,6 +947,9 @@ struct ext4_inode_info {
42         __u32   i_dtime;
43         ext4_fsblk_t    i_file_acl;
44  
45 +       /* following fields for parallel directory operations -bzzz */
46 +       struct semaphore i_append_sem;
47 +
48         /*
49          * i_block_group is the number of the block group which contains
50          * this file's inode.  Constant across the lifetime of the inode,
51 @@ -2185,6 +2189,72 @@ struct dx_hash_info
52   */
53  #define HASH_NB_ALWAYS         1
54  
55 +/* assume name-hash is protected by upper layer */
56 +#define EXT4_HTREE_LOCK_HASH   0
57 +
58 +enum ext4_pdo_lk_types {
59 +#if EXT4_HTREE_LOCK_HASH
60 +       EXT4_LK_HASH,
61 +#endif
62 +       EXT4_LK_DX,             /* index block */
63 +       EXT4_LK_DE,             /* directory entry block */
64 +       EXT4_LK_SPIN,           /* spinlock */
65 +       EXT4_LK_MAX,
66 +};
67 +
68 +/* read-only bit */
69 +#define EXT4_LB_RO(b)          (1 << (b))
70 +/* read + write, high bits for writer */
71 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
72 +
73 +enum ext4_pdo_lock_bits {
74 +       /* DX lock bits */
75 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
76 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
77 +       /* DE lock bits */
78 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
79 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
80 +       /* DX spinlock bits */
81 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
82 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
83 +       /* accurate searching */
84 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
85 +};
86 +
87 +enum ext4_pdo_lock_opc {
88 +       /* external */
89 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
90 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
91 +                                  EXT4_LB_EXACT),
92 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
93 +                                  EXT4_LB_EXACT),
94 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
95 +
96 +       /* internal */
97 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
98 +                                  EXT4_LB_EXACT),
99 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
100 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
101 +};
102 +
103 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
104 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
105 +
106 +extern struct htree_lock *ext4_htree_lock_alloc(void);
107 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
108 +
109 +extern void ext4_htree_lock(struct htree_lock *lck,
110 +                           struct htree_lock_head *lhead,
111 +                           struct inode *dir, unsigned flags);
112 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
113 +
114 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
115 +                                       const struct qstr *d_name,
116 +                                       struct ext4_dir_entry_2 **res_dir,
117 +                                       int *inlined, struct htree_lock *lck);
118 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
119 +                     struct inode *inode, struct htree_lock *lck);
120 +
121  struct ext4_filename {
122         const struct qstr *usr_fname;
123         struct fscrypt_str disk_name;
124 @@ -2487,11 +2557,19 @@ void ext4_insert_dentry(struct inode *in
125                         struct ext4_filename *fname, void *data);
126  static inline void ext4_update_dx_flag(struct inode *inode)
127  {
128 +       /* Disable it for ldiskfs, because going from a DX directory to
129 +        * a non-DX directory while it is in use will completely break
130 +        * the htree-locking.
131 +        * If we really want to support this operation in the future,
132 +        * we need to exclusively lock the directory at here which will
133 +        * increase complexity of code */
134 +#if 0
135         if (!ext4_has_feature_dir_index(inode->i_sb)) {
136                 /* ext4_iget() should have caught this... */
137                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
138                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
139         }
140 +#endif
141  }
142  static const unsigned char ext4_filetype_table[] = {
143         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
144 Index: linux-4.18.0-240.1.1.el8/fs/ext4/namei.c
145 ===================================================================
146 --- linux-4.18.0-240.1.1.el8.orig/fs/ext4/namei.c
147 +++ linux-4.18.0-240.1.1.el8/fs/ext4/namei.c
148 @@ -54,6 +54,7 @@ struct buffer_head *ext4_append(handle_t
149                                         ext4_lblk_t *block)
150  {
151         struct buffer_head *bh;
152 +       struct ext4_inode_info *ei = EXT4_I(inode);
153         int err;
154  
155         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
156 @@ -61,15 +62,22 @@ struct buffer_head *ext4_append(handle_t
157                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
158                 return ERR_PTR(-ENOSPC);
159  
160 +       /* with parallel dir operations all appends
161 +       * have to be serialized -bzzz */
162 +       down(&ei->i_append_sem);
163 +
164         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
165  
166         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
167 -       if (IS_ERR(bh))
168 +       if (IS_ERR(bh)) {
169 +               up(&ei->i_append_sem);
170                 return bh;
171 +       }
172         inode->i_size += inode->i_sb->s_blocksize;
173         EXT4_I(inode)->i_disksize = inode->i_size;
174         BUFFER_TRACE(bh, "get_write_access");
175         err = ext4_journal_get_write_access(handle, bh);
176 +       up(&ei->i_append_sem);
177         if (err) {
178                 brelse(bh);
179                 ext4_std_error(inode->i_sb, err);
180 @@ -252,7 +260,8 @@ static unsigned dx_node_limit(struct ino
181  static struct dx_frame *dx_probe(struct ext4_filename *fname,
182                                  struct inode *dir,
183                                  struct dx_hash_info *hinfo,
184 -                                struct dx_frame *frame);
185 +                                struct dx_frame *frame,
186 +                                struct htree_lock *lck);
187  static void dx_release(struct dx_frame *frames);
188  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
189                        unsigned blocksize, struct dx_hash_info *hinfo,
190 @@ -266,12 +275,13 @@ static void dx_insert_block(struct dx_fr
191  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
192                                  struct dx_frame *frame,
193                                  struct dx_frame *frames,
194 -                                __u32 *start_hash);
195 +                                __u32 *start_hash, struct htree_lock *lck);
196  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
197                 struct ext4_filename *fname,
198 -               struct ext4_dir_entry_2 **res_dir);
199 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
200  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
201 -                            struct inode *dir, struct inode *inode);
202 +                            struct inode *dir, struct inode *inode,
203 +                            struct htree_lock *lck);
204  
205  /* checksumming functions */
206  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
207 @@ -735,6 +745,227 @@ struct stats dx_show_entries(struct dx_h
208  }
209  #endif /* DX_DEBUG */
210  
211 +/* private data for htree_lock */
212 +struct ext4_dir_lock_data {
213 +       unsigned                ld_flags;  /* bits-map for lock types */
214 +       unsigned                ld_count;  /* # entries of the last DX block */
215 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
216 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
217 +};
218 +
219 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
220 +#define ext4_find_entry(dir, name, dirent, inline) \
221 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
222 +#define ext4_add_entry(handle, dentry, inode) \
223 +                       __ext4_add_entry(handle, dentry, inode, NULL)
224 +
225 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
226 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
227 +
228 +static void ext4_htree_event_cb(void *target, void *event)
229 +{
230 +       u64 *block = (u64 *)target;
231 +
232 +       if (*block == dx_get_block((struct dx_entry *)event))
233 +               *block = EXT4_HTREE_NODE_CHANGED;
234 +}
235 +
236 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
237 +{
238 +       struct htree_lock_head *lhead;
239 +
240 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
241 +       if (lhead != NULL) {
242 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
243 +                                       ext4_htree_event_cb);
244 +       }
245 +       return lhead;
246 +}
247 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
248 +
249 +struct htree_lock *ext4_htree_lock_alloc(void)
250 +{
251 +       return htree_lock_alloc(EXT4_LK_MAX,
252 +                               sizeof(struct ext4_dir_lock_data));
253 +}
254 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
255 +
256 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
257 +{
258 +       switch (flags) {
259 +       default: /* 0 or unknown flags require EX lock */
260 +               return HTREE_LOCK_EX;
261 +       case EXT4_HLOCK_READDIR:
262 +               return HTREE_LOCK_PR;
263 +       case EXT4_HLOCK_LOOKUP:
264 +               return HTREE_LOCK_CR;
265 +       case EXT4_HLOCK_DEL:
266 +       case EXT4_HLOCK_ADD:
267 +               return HTREE_LOCK_CW;
268 +       }
269 +}
270 +
271 +/* return PR for read-only operations, otherwise return EX */
272 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
273 +{
274 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
275 +
276 +       /* 0 requires EX lock */
277 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
278 +}
279 +
280 +static int ext4_htree_safe_locked(struct htree_lock *lck)
281 +{
282 +       int writer;
283 +
284 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
285 +               return 1;
286 +
287 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
288 +                EXT4_LB_DE;
289 +       if (writer) /* all readers & writers are excluded? */
290 +               return lck->lk_mode == HTREE_LOCK_EX;
291 +
292 +       /* all writers are excluded? */
293 +       return lck->lk_mode == HTREE_LOCK_PR ||
294 +              lck->lk_mode == HTREE_LOCK_PW ||
295 +              lck->lk_mode == HTREE_LOCK_EX;
296 +}
297 +
298 +/* relock htree_lock with EX mode if it's change operation, otherwise
299 + * relock it with PR mode. It's noop if PDO is disabled. */
300 +static void ext4_htree_safe_relock(struct htree_lock *lck)
301 +{
302 +       if (!ext4_htree_safe_locked(lck)) {
303 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
304 +
305 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
306 +       }
307 +}
308 +
309 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
310 +                    struct inode *dir, unsigned flags)
311 +{
312 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
313 +                                             ext4_htree_safe_mode(flags);
314 +
315 +       ext4_htree_lock_data(lck)->ld_flags = flags;
316 +       htree_lock(lck, lhead, mode);
317 +       if (!is_dx(dir))
318 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
319 +}
320 +EXPORT_SYMBOL(ext4_htree_lock);
321 +
322 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
323 +                               unsigned lmask, int wait, void *ev)
324 +{
325 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
326 +       u32     mode;
327 +
328 +       /* NOOP if htree is well protected or caller doesn't require the lock */
329 +       if (ext4_htree_safe_locked(lck) ||
330 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
331 +               return 1;
332 +
333 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
334 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
335 +       while (1) {
336 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
337 +                       return 1;
338 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
339 +                       return 0;
340 +               cpu_relax(); /* spin until granted */
341 +       }
342 +}
343 +
344 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
345 +{
346 +       return ext4_htree_safe_locked(lck) ||
347 +              htree_node_is_granted(lck, ffz(~lmask));
348 +}
349 +
350 +static void ext4_htree_node_unlock(struct htree_lock *lck,
351 +                                  unsigned lmask, void *buf)
352 +{
353 +       /* NB: it's safe to call mutiple times or even it's not locked */
354 +       if (!ext4_htree_safe_locked(lck) &&
355 +            htree_node_is_granted(lck, ffz(~lmask)))
356 +               htree_node_unlock(lck, ffz(~lmask), buf);
357 +}
358 +
359 +#define ext4_htree_dx_lock(lck, key)           \
360 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
361 +#define ext4_htree_dx_lock_try(lck, key)       \
362 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
363 +#define ext4_htree_dx_unlock(lck)              \
364 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
365 +#define ext4_htree_dx_locked(lck)              \
366 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
367 +
368 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
369 +{
370 +       struct ext4_dir_lock_data *ld;
371 +
372 +       if (ext4_htree_safe_locked(lck))
373 +               return;
374 +
375 +       ld = ext4_htree_lock_data(lck);
376 +       switch (ld->ld_flags) {
377 +       default:
378 +               return;
379 +       case EXT4_HLOCK_LOOKUP:
380 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
381 +               return;
382 +       case EXT4_HLOCK_DEL:
383 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
384 +               return;
385 +       case EXT4_HLOCK_ADD:
386 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
387 +               return;
388 +       }
389 +}
390 +
391 +#define ext4_htree_de_lock(lck, key)           \
392 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
393 +#define ext4_htree_de_unlock(lck)              \
394 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
395 +
396 +#define ext4_htree_spin_lock(lck, key, event)  \
397 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
398 +#define ext4_htree_spin_unlock(lck)            \
399 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
400 +#define ext4_htree_spin_unlock_listen(lck, p)  \
401 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
402 +
403 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
404 +{
405 +       if (!ext4_htree_safe_locked(lck) &&
406 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
407 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
408 +}
409 +
410 +enum {
411 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
412 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
413 +       DX_HASH_COL_NO,         /* there is no collision */
414 +};
415 +
416 +static int dx_probe_hash_collision(struct htree_lock *lck,
417 +                                  struct dx_entry *entries,
418 +                                  struct dx_entry *at, u32 hash)
419 +{
420 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
421 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
422 +
423 +       } else if (at == entries + dx_get_count(entries) - 1) {
424 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
425 +
426 +       } else { /* hash collision? */
427 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
428 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
429 +       }
430 +}
431 +
432  /*
433   * Probe for a directory leaf block to search.
434   *
435 @@ -746,10 +977,11 @@ struct stats dx_show_entries(struct dx_h
436   */
437  static struct dx_frame *
438  dx_probe(struct ext4_filename *fname, struct inode *dir,
439 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
440 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
441 +        struct htree_lock *lck)
442  {
443         unsigned count, indirect;
444 -       struct dx_entry *at, *entries, *p, *q, *m;
445 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
446         struct dx_root_info *info;
447         struct dx_frame *frame = frame_in;
448         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
449 @@ -811,8 +1043,15 @@ dx_probe(struct ext4_filename *fname, st
450  
451         dxtrace(printk("Look up %x", hash));
452         while (1) {
453 +               if (indirect == 0) { /* the last index level */
454 +                       /* NB: ext4_htree_dx_lock() could be noop if
455 +                        * DX-lock flag is not set for current operation */
456 +                       ext4_htree_dx_lock(lck, dx);
457 +                       ext4_htree_spin_lock(lck, dx, NULL);
458 +               }
459                 count = dx_get_count(entries);
460 -               if (!count || count > dx_get_limit(entries)) {
461 +               if (count == 0 || count > dx_get_limit(entries)) {
462 +                       ext4_htree_spin_unlock(lck); /* release spin */
463                         ext4_warning_inode(dir,
464                                            "dx entry: count %u beyond limit %u",
465                                            count, dx_get_limit(entries));
466 @@ -851,8 +1090,70 @@ dx_probe(struct ext4_filename *fname, st
467                                dx_get_block(at)));
468                 frame->entries = entries;
469                 frame->at = at;
470 -               if (!indirect--)
471 +
472 +               if (indirect == 0) { /* the last index level */
473 +                       struct ext4_dir_lock_data *ld;
474 +                       u64 myblock;
475 +
476 +                       /* By default we only lock DE-block, however, we will
477 +                        * also lock the last level DX-block if:
478 +                        * a) there is hash collision
479 +                        *    we will set DX-lock flag (a few lines below)
480 +                        *    and redo to lock DX-block
481 +                        *    see detail in dx_probe_hash_collision()
482 +                        * b) it's a retry from splitting
483 +                        *    we need to lock the last level DX-block so nobody
484 +                        *    else can split any leaf blocks under the same
485 +                        *    DX-block, see detail in ext4_dx_add_entry()
486 +                        */
487 +                       if (ext4_htree_dx_locked(lck)) {
488 +                               /* DX-block is locked, just lock DE-block
489 +                                * and return */
490 +                               ext4_htree_spin_unlock(lck);
491 +                               if (!ext4_htree_safe_locked(lck))
492 +                                       ext4_htree_de_lock(lck, frame->at);
493 +                               return frame;
494 +                       }
495 +                       /* it's pdirop and no DX lock */
496 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
497 +                           DX_HASH_COL_YES) {
498 +                               /* found hash collision, set DX-lock flag
499 +                                * and retry to abtain DX-lock */
500 +                               ext4_htree_spin_unlock(lck);
501 +                               ext4_htree_dx_need_lock(lck);
502 +                               continue;
503 +                       }
504 +                       ld = ext4_htree_lock_data(lck);
505 +                       /* because I don't lock DX, so @at can't be trusted
506 +                        * after I release spinlock so I have to save it */
507 +                       ld->ld_at = at;
508 +                       ld->ld_at_entry = *at;
509 +                       ld->ld_count = dx_get_count(entries);
510 +
511 +                       frame->at = &ld->ld_at_entry;
512 +                       myblock = dx_get_block(at);
513 +
514 +                       /* NB: ordering locking */
515 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
516 +                       /* other thread can split this DE-block because:
517 +                        * a) I don't have lock for the DE-block yet
518 +                        * b) I released spinlock on DX-block
519 +                        * if it happened I can detect it by listening
520 +                        * splitting event on this DE-block */
521 +                       ext4_htree_de_lock(lck, frame->at);
522 +                       ext4_htree_spin_stop_listen(lck);
523 +
524 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
525 +                               /* someone split this DE-block before
526 +                                * I locked it, I need to retry and lock
527 +                                * valid DE-block */
528 +                               ext4_htree_de_unlock(lck);
529 +                               continue;
530 +                       }
531                         return frame;
532 +               }
533 +               dx = at;
534 +               indirect--;
535                 frame++;
536                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
537                 if (IS_ERR(frame->bh)) {
538 @@ -921,7 +1222,7 @@ static void dx_release(struct dx_frame *
539  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
540                                  struct dx_frame *frame,
541                                  struct dx_frame *frames,
542 -                                __u32 *start_hash)
543 +                                __u32 *start_hash, struct htree_lock *lck)
544  {
545         struct dx_frame *p;
546         struct buffer_head *bh;
547 @@ -936,12 +1237,22 @@ static int ext4_htree_next_block(struct
548          * this loop, num_frames indicates the number of interior
549          * nodes need to be read.
550          */
551 +       ext4_htree_de_unlock(lck);
552         while (1) {
553 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
554 -                       break;
555 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
556 +                       /* num_frames > 0 :
557 +                        *   DX block
558 +                        * ext4_htree_dx_locked:
559 +                        *   frame->at is reliable pointer returned by dx_probe,
560 +                        *   otherwise dx_probe already knew no collision */
561 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
562 +                               break;
563 +               }
564                 if (p == frames)
565                         return 0;
566                 num_frames++;
567 +               if (num_frames == 1)
568 +                       ext4_htree_dx_unlock(lck);
569                 p--;
570         }
571  
572 @@ -964,6 +1275,13 @@ static int ext4_htree_next_block(struct
573          * block so no check is necessary
574          */
575         while (num_frames--) {
576 +               if (num_frames == 0) {
577 +                       /* it's not always necessary, we just don't want to
578 +                        * detect hash collision again */
579 +                       ext4_htree_dx_need_lock(lck);
580 +                       ext4_htree_dx_lock(lck, p->at);
581 +               }
582 +
583                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
584                 if (IS_ERR(bh))
585                         return PTR_ERR(bh);
586 @@ -972,6 +1290,7 @@ static int ext4_htree_next_block(struct
587                 p->bh = bh;
588                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
589         }
590 +       ext4_htree_de_lock(lck, p->at);
591         return 1;
592  }
593  
594 @@ -1119,10 +1438,10 @@ int ext4_htree_fill_tree(struct file *di
595         }
596         hinfo.hash = start_hash;
597         hinfo.minor_hash = 0;
598 -       frame = dx_probe(NULL, dir, &hinfo, frames);
599 +       /* assume it's PR locked */
600 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
601         if (IS_ERR(frame))
602                 return PTR_ERR(frame);
603 -
604         /* Add '.' and '..' from the htree header */
605         if (!start_hash && !start_minor_hash) {
606                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
607 @@ -1162,7 +1481,7 @@ int ext4_htree_fill_tree(struct file *di
608                 count += ret;
609                 hashval = ~0;
610                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
611 -                                           frame, frames, &hashval);
612 +                                           frame, frames, &hashval, NULL);
613                 *next_hash = hashval;
614                 if (ret < 0) {
615                         err = ret;
616 @@ -1354,10 +1673,10 @@ static int is_dx_internal_node(struct in
617   * The returned buffer_head has ->b_count elevated.  The caller is expected
618   * to brelse() it when appropriate.
619   */
620 -static struct buffer_head * ext4_find_entry (struct inode *dir,
621 +struct buffer_head *__ext4_find_entry(struct inode *dir,
622                                         const struct qstr *d_name,
623                                         struct ext4_dir_entry_2 **res_dir,
624 -                                       int *inlined)
625 +                                       int *inlined, struct htree_lock *lck)
626  {
627         struct super_block *sb;
628         struct buffer_head *bh_use[NAMEI_RA_SIZE];
629 @@ -1406,7 +1725,7 @@ static struct buffer_head * ext4_find_en
630                 goto restart;
631         }
632         if (is_dx(dir)) {
633 -               ret = ext4_dx_find_entry(dir, &fname, res_dir);
634 +               ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
635                 /*
636                  * On success, or if the error was file not found,
637                  * return.  Otherwise, fall back to doing a search the
638 @@ -1416,6 +1735,7 @@ static struct buffer_head * ext4_find_en
639                         goto cleanup_and_exit;
640                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
641                                "falling back\n"));
642 +               ext4_htree_safe_relock(lck);
643                 ret = NULL;
644         }
645         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
646 @@ -1507,10 +1827,12 @@ cleanup_and_exit:
647         ext4_fname_free_filename(&fname);
648         return ret;
649  }
650 +EXPORT_SYMBOL(__ext4_find_entry);
651  
652  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
653                         struct ext4_filename *fname,
654 -                       struct ext4_dir_entry_2 **res_dir)
655 +                       struct ext4_dir_entry_2 **res_dir,
656 +                       struct htree_lock *lck)
657  {
658         struct super_block * sb = dir->i_sb;
659         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
660 @@ -1521,7 +1843,7 @@ static struct buffer_head * ext4_dx_find
661  #ifdef CONFIG_EXT4_FS_ENCRYPTION
662         *res_dir = NULL;
663  #endif
664 -       frame = dx_probe(fname, dir, NULL, frames);
665 +       frame = dx_probe(fname, dir, NULL, frames, lck);
666         if (IS_ERR(frame))
667                 return (struct buffer_head *) frame;
668         do {
669 @@ -1543,7 +1865,7 @@ static struct buffer_head * ext4_dx_find
670  
671                 /* Check to see if we should continue to search */
672                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
673 -                                              frames, NULL);
674 +                                              frames, NULL, lck);
675                 if (retval < 0) {
676                         ext4_warning_inode(dir,
677                                 "error %d reading directory index block",
678 @@ -1718,8 +2040,9 @@ static struct ext4_dir_entry_2* dx_pack_
679   * Returns pointer to de in block into which the new entry will be inserted.
680   */
681  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
682 -                       struct buffer_head **bh,struct dx_frame *frame,
683 -                       struct dx_hash_info *hinfo)
684 +                       struct buffer_head **bh, struct dx_frame *frames,
685 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
686 +                       struct htree_lock *lck)
687  {
688         unsigned blocksize = dir->i_sb->s_blocksize;
689         unsigned count, continued;
690 @@ -1781,8 +2104,14 @@ static struct ext4_dir_entry_2 *do_split
691                                         hash2, split, count-split));
692  
693         /* Fancy dance to stay within two buffers */
694 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
695 -                             blocksize);
696 +       if (hinfo->hash < hash2) {
697 +               de2 = dx_move_dirents(data1, data2, map + split,
698 +                                     count - split, blocksize);
699 +       } else {
700 +               /* make sure we will add entry to the same block which
701 +                * we have already locked */
702 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
703 +       }
704         de = dx_pack_dirents(data1, blocksize);
705         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
706                                            (char *) de,
707 @@ -1803,12 +2132,21 @@ static struct ext4_dir_entry_2 *do_split
708         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
709                         blocksize, 1));
710  
711 -       /* Which block gets the new entry? */
712 -       if (hinfo->hash >= hash2) {
713 -               swap(*bh, bh2);
714 -               de = de2;
715 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
716 +                            frame->at); /* notify block is being split */
717 +       if (hinfo->hash < hash2) {
718 +               dx_insert_block(frame, hash2 + continued, newblock);
719 +
720 +       } else {
721 +               /* switch block number */
722 +               dx_insert_block(frame, hash2 + continued,
723 +                               dx_get_block(frame->at));
724 +               dx_set_block(frame->at, newblock);
725 +               (frame->at)++;
726         }
727 -       dx_insert_block(frame, hash2 + continued, newblock);
728 +       ext4_htree_spin_unlock(lck);
729 +       ext4_htree_dx_unlock(lck);
730 +
731         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
732         if (err)
733                 goto journal_error;
734 @@ -2082,7 +2420,7 @@ static int make_indexed_dir(handle_t *ha
735         if (retval)
736                 goto out_frames;        
737  
738 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
739 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
740         if (IS_ERR(de)) {
741                 retval = PTR_ERR(de);
742                 goto out_frames;
743 @@ -2192,8 +2530,8 @@ out:
744   * may not sleep between calling this and putting something into
745   * the entry, as someone else might have used it while you slept.
746   */
747 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
748 -                         struct inode *inode)
749 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
750 +                     struct inode *inode, struct htree_lock *lck)
751  {
752         struct inode *dir = d_inode(dentry->d_parent);
753         struct buffer_head *bh = NULL;
754 @@ -2234,9 +2572,10 @@ static int ext4_add_entry(handle_t *hand
755                 if (dentry->d_name.len == 2 &&
756                     memcmp(dentry->d_name.name, "..", 2) == 0)
757                         return ext4_update_dotdot(handle, dentry, inode);
758 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
759 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
760                 if (!retval || (retval != ERR_BAD_DX_DIR))
761                         goto out;
762 +               ext4_htree_safe_relock(lck);
763                 /* Can we just ignore htree data? */
764                 if (ext4_has_metadata_csum(sb)) {
765                         EXT4_ERROR_INODE(dir,
766 @@ -2293,12 +2632,14 @@ out:
767                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
768         return retval;
769  }
770 +EXPORT_SYMBOL(__ext4_add_entry);
771  
772  /*
773   * Returns 0 for success, or a negative error value
774   */
775  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
776 -                            struct inode *dir, struct inode *inode)
777 +                            struct inode *dir, struct inode *inode,
778 +                            struct htree_lock *lck)
779  {
780         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
781         struct dx_entry *entries, *at;
782 @@ -2310,7 +2651,7 @@ static int ext4_dx_add_entry(handle_t *h
783  
784  again:
785         restart = 0;
786 -       frame = dx_probe(fname, dir, NULL, frames);
787 +       frame = dx_probe(fname, dir, NULL, frames, lck);
788         if (IS_ERR(frame))
789                 return PTR_ERR(frame);
790         entries = frame->entries;
791 @@ -2345,6 +2686,11 @@ again:
792                 struct dx_node *node2;
793                 struct buffer_head *bh2;
794  
795 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
796 +                       ext4_htree_safe_relock(lck);
797 +                       restart = 1;
798 +                       goto cleanup;
799 +               }
800                 while (frame > frames) {
801                         if (dx_get_count((frame - 1)->entries) <
802                             dx_get_limit((frame - 1)->entries)) {
803 @@ -2447,8 +2793,32 @@ again:
804                         restart = 1;
805                         goto journal_error;
806                 }
807 +       } else if (!ext4_htree_dx_locked(lck)) {
808 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
809 +
810 +               /* not well protected, require DX lock */
811 +               ext4_htree_dx_need_lock(lck);
812 +               at = frame > frames ? (frame - 1)->at : NULL;
813 +
814 +               /* NB: no risk of deadlock because it's just a try.
815 +                *
816 +                * NB: we check ld_count for twice, the first time before
817 +                * having DX lock, the second time after holding DX lock.
818 +                *
819 +                * NB: We never free blocks for directory so far, which
820 +                * means value returned by dx_get_count() should equal to
821 +                * ld->ld_count if nobody split any DE-block under @at,
822 +                * and ld->ld_at still points to valid dx_entry. */
823 +               if ((ld->ld_count != dx_get_count(entries)) ||
824 +                   !ext4_htree_dx_lock_try(lck, at) ||
825 +                   (ld->ld_count != dx_get_count(entries))) {
826 +                       restart = 1;
827 +                       goto cleanup;
828 +               }
829 +               /* OK, I've got DX lock and nothing changed */
830 +               frame->at = ld->ld_at;
831         }
832 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
833 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
834         if (IS_ERR(de)) {
835                 err = PTR_ERR(de);
836                 goto cleanup;
837 @@ -2459,6 +2829,8 @@ again:
838  journal_error:
839         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
840  cleanup:
841 +       ext4_htree_dx_unlock(lck);
842 +       ext4_htree_de_unlock(lck);
843         brelse(bh);
844         dx_release(frames);
845         /* @restart is true means htree-path has been changed, we need to
846 Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
847 ===================================================================
848 --- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/super.c
849 +++ linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
850 @@ -1009,6 +1009,7 @@ static struct inode *ext4_alloc_inode(st
851  
852         inode_set_iversion(&ei->vfs_inode, 1);
853         spin_lock_init(&ei->i_raw_lock);
854 +       sema_init(&ei->i_append_sem, 1);
855         INIT_LIST_HEAD(&ei->i_prealloc_list);
856         spin_lock_init(&ei->i_prealloc_lock);
857         ext4_es_init_tree(&ei->i_es_tree);