Whamcloud - gitweb
LU-17914 lnet: Fix erroneous net set error
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel8.7 / ext4-pdirop.patch
1 LU-50 ldiskfs: pdirops patch for ldiskfs
2
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
14
15 This patch contains:
16   - pdirops support for ldiskfs
17   - N-level htree directory
18   - integrate with osd-ldiskfs
19
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
24
25 Index: linux-4.18.0-423.el8/fs/ext4/Makefile
26 ===================================================================
27 --- linux-4.18.0-423.el8.orig/fs/ext4/Makefile
28 +++ linux-4.18.0-423.el8/fs/ext4/Makefile
29 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
30
31  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
32                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
33 +               htree_lock.o \
34                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
35                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
36                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
37 Index: linux-4.18.0-423.el8/fs/ext4/ext4.h
38 ===================================================================
39 --- linux-4.18.0-423.el8.orig/fs/ext4/ext4.h
40 +++ linux-4.18.0-423.el8/fs/ext4/ext4.h
41 @@ -29,6 +29,7 @@
42  #include <linux/timer.h>
43  #include <linux/version.h>
44  #include <linux/wait.h>
45 +#include <linux/htree_lock.h>
46  #include <linux/sched/signal.h>
47  #include <linux/blockgroup_lock.h>
48  #include <linux/percpu_counter.h>
49 @@ -966,6 +967,9 @@ struct ext4_inode_info {
50         __u32   i_dtime;
51         ext4_fsblk_t    i_file_acl;
52
53 +       /* following fields for parallel directory operations -bzzz */
54 +       struct semaphore i_append_sem;
55 +
56         /*
57          * i_block_group is the number of the block group which contains
58          * this file's inode.  Constant across the lifetime of the inode,
59 @@ -2217,6 +2221,72 @@ struct dx_hash_info
60   */
61  #define HASH_NB_ALWAYS         1
62
63 +/* assume name-hash is protected by upper layer */
64 +#define EXT4_HTREE_LOCK_HASH   0
65 +
66 +enum ext4_pdo_lk_types {
67 +#if EXT4_HTREE_LOCK_HASH
68 +       EXT4_LK_HASH,
69 +#endif
70 +       EXT4_LK_DX,             /* index block */
71 +       EXT4_LK_DE,             /* directory entry block */
72 +       EXT4_LK_SPIN,           /* spinlock */
73 +       EXT4_LK_MAX,
74 +};
75 +
76 +/* read-only bit */
77 +#define EXT4_LB_RO(b)          (1 << (b))
78 +/* read + write, high bits for writer */
79 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
80 +
81 +enum ext4_pdo_lock_bits {
82 +       /* DX lock bits */
83 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
84 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
85 +       /* DE lock bits */
86 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
87 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
88 +       /* DX spinlock bits */
89 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
90 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
91 +       /* accurate searching */
92 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
93 +};
94 +
95 +enum ext4_pdo_lock_opc {
96 +       /* external */
97 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
98 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
99 +                                  EXT4_LB_EXACT),
100 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
101 +                                  EXT4_LB_EXACT),
102 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
103 +
104 +       /* internal */
105 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
106 +                                  EXT4_LB_EXACT),
107 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
108 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
109 +};
110 +
111 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
112 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
113 +
114 +extern struct htree_lock *ext4_htree_lock_alloc(void);
115 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
116 +
117 +extern void ext4_htree_lock(struct htree_lock *lck,
118 +                           struct htree_lock_head *lhead,
119 +                           struct inode *dir, unsigned flags);
120 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
121 +
122 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
123 +                                       const struct qstr *d_name,
124 +                                       struct ext4_dir_entry_2 **res_dir,
125 +                                       int *inlined, struct htree_lock *lck);
126 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
127 +                     struct inode *inode, struct htree_lock *lck);
128 +
129  struct ext4_filename {
130         const struct qstr *usr_fname;
131         struct fscrypt_str disk_name;
132 @@ -2519,12 +2589,20 @@ void ext4_insert_dentry(struct inode *in
133                         struct ext4_filename *fname, void *data);
134  static inline void ext4_update_dx_flag(struct inode *inode)
135  {
136 +       /* Disable it for ldiskfs, because going from a DX directory to
137 +        * a non-DX directory while it is in use will completely break
138 +        * the htree-locking.
139 +        * If we really want to support this operation in the future,
140 +        * we need to exclusively lock the directory at here which will
141 +        * increase complexity of code */
142 +#if 0
143         if (!ext4_has_feature_dir_index(inode->i_sb) &&
144             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
145                 /* ext4_iget() should have caught this... */
146                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
147                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
148         }
149 +#endif
150  }
151  static const unsigned char ext4_filetype_table[] = {
152         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
153 Index: linux-4.18.0-423.el8/fs/ext4/namei.c
154 ===================================================================
155 --- linux-4.18.0-423.el8.orig/fs/ext4/namei.c
156 +++ linux-4.18.0-423.el8/fs/ext4/namei.c
157 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
158  {
159         struct ext4_map_blocks map;
160         struct buffer_head *bh;
161 +       struct ext4_inode_info *ei = EXT4_I(inode);
162         int err;
163
164         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
165 @@ -62,6 +63,10 @@ struct buffer_head *ext4_append(handle_t
166                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
167                 return ERR_PTR(-ENOSPC);
168
169 +       /* with parallel dir operations all appends
170 +       * have to be serialized -bzzz */
171 +       down(&ei->i_append_sem);
172 +
173         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
174         map.m_lblk = *block;
175         map.m_len = 1;
176 @@ -72,18 +77,24 @@ struct buffer_head *ext4_append(handle_t
177          * directory.
178          */
179         err = ext4_map_blocks(NULL, inode, &map, 0);
180 -       if (err < 0)
181 +       if (err < 0) {
182 +               up(&ei->i_append_sem);
183                 return ERR_PTR(err);
184 +       }
185         if (err) {
186 +               up(&ei->i_append_sem);
187                 EXT4_ERROR_INODE(inode, "Logical block already allocated");
188                 return ERR_PTR(-EFSCORRUPTED);
189         }
190
191         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
192 -       if (IS_ERR(bh))
193 +       if (IS_ERR(bh)) {
194 +               up(&ei->i_append_sem);
195                 return bh;
196 +       }
197         inode->i_size += inode->i_sb->s_blocksize;
198         EXT4_I(inode)->i_disksize = inode->i_size;
199 +       up(&ei->i_append_sem);
200         BUFFER_TRACE(bh, "get_write_access");
201         err = ext4_journal_get_write_access(handle, bh);
202         if (err) {
203 @@ -288,7 +299,8 @@ static unsigned dx_node_limit(struct ino
204  static struct dx_frame *dx_probe(struct ext4_filename *fname,
205                                  struct inode *dir,
206                                  struct dx_hash_info *hinfo,
207 -                                struct dx_frame *frame);
208 +                                struct dx_frame *frame,
209 +                                struct htree_lock *lck);
210  static void dx_release(struct dx_frame *frames);
211  static int dx_make_map(struct inode *dir, struct buffer_head *bh,
212                        struct dx_hash_info *hinfo,
213 @@ -302,12 +314,13 @@ static void dx_insert_block(struct dx_fr
214  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
215                                  struct dx_frame *frame,
216                                  struct dx_frame *frames,
217 -                                __u32 *start_hash);
218 +                                __u32 *start_hash, struct htree_lock *lck);
219  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
220                 struct ext4_filename *fname,
221 -               struct ext4_dir_entry_2 **res_dir);
222 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
223  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
224 -                            struct inode *dir, struct inode *inode);
225 +                            struct inode *dir, struct inode *inode,
226 +                            struct htree_lock *lck);
227
228  /* checksumming functions */
229  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
230 @@ -771,6 +784,227 @@ struct stats dx_show_entries(struct dx_h
231  }
232  #endif /* DX_DEBUG */
233
234 +/* private data for htree_lock */
235 +struct ext4_dir_lock_data {
236 +       unsigned                ld_flags;  /* bits-map for lock types */
237 +       unsigned                ld_count;  /* # entries of the last DX block */
238 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
239 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
240 +};
241 +
242 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
243 +#define ext4_find_entry(dir, name, dirent, inline) \
244 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
245 +#define ext4_add_entry(handle, dentry, inode) \
246 +                       __ext4_add_entry(handle, dentry, inode, NULL)
247 +
248 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
249 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
250 +
251 +static void ext4_htree_event_cb(void *target, void *event)
252 +{
253 +       u64 *block = (u64 *)target;
254 +
255 +       if (*block == dx_get_block((struct dx_entry *)event))
256 +               *block = EXT4_HTREE_NODE_CHANGED;
257 +}
258 +
259 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
260 +{
261 +       struct htree_lock_head *lhead;
262 +
263 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
264 +       if (lhead != NULL) {
265 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
266 +                                       ext4_htree_event_cb);
267 +       }
268 +       return lhead;
269 +}
270 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
271 +
272 +struct htree_lock *ext4_htree_lock_alloc(void)
273 +{
274 +       return htree_lock_alloc(EXT4_LK_MAX,
275 +                               sizeof(struct ext4_dir_lock_data));
276 +}
277 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
278 +
279 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
280 +{
281 +       switch (flags) {
282 +       default: /* 0 or unknown flags require EX lock */
283 +               return HTREE_LOCK_EX;
284 +       case EXT4_HLOCK_READDIR:
285 +               return HTREE_LOCK_PR;
286 +       case EXT4_HLOCK_LOOKUP:
287 +               return HTREE_LOCK_CR;
288 +       case EXT4_HLOCK_DEL:
289 +       case EXT4_HLOCK_ADD:
290 +               return HTREE_LOCK_CW;
291 +       }
292 +}
293 +
294 +/* return PR for read-only operations, otherwise return EX */
295 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
296 +{
297 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
298 +
299 +       /* 0 requires EX lock */
300 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
301 +}
302 +
303 +static int ext4_htree_safe_locked(struct htree_lock *lck)
304 +{
305 +       int writer;
306 +
307 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
308 +               return 1;
309 +
310 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
311 +                EXT4_LB_DE;
312 +       if (writer) /* all readers & writers are excluded? */
313 +               return lck->lk_mode == HTREE_LOCK_EX;
314 +
315 +       /* all writers are excluded? */
316 +       return lck->lk_mode == HTREE_LOCK_PR ||
317 +              lck->lk_mode == HTREE_LOCK_PW ||
318 +              lck->lk_mode == HTREE_LOCK_EX;
319 +}
320 +
321 +/* relock htree_lock with EX mode if it's change operation, otherwise
322 + * relock it with PR mode. It's noop if PDO is disabled. */
323 +static void ext4_htree_safe_relock(struct htree_lock *lck)
324 +{
325 +       if (!ext4_htree_safe_locked(lck)) {
326 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
327 +
328 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
329 +       }
330 +}
331 +
332 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
333 +                    struct inode *dir, unsigned flags)
334 +{
335 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
336 +                                             ext4_htree_safe_mode(flags);
337 +
338 +       ext4_htree_lock_data(lck)->ld_flags = flags;
339 +       htree_lock(lck, lhead, mode);
340 +       if (!is_dx(dir))
341 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
342 +}
343 +EXPORT_SYMBOL(ext4_htree_lock);
344 +
345 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
346 +                               unsigned lmask, int wait, void *ev)
347 +{
348 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
349 +       u32     mode;
350 +
351 +       /* NOOP if htree is well protected or caller doesn't require the lock */
352 +       if (ext4_htree_safe_locked(lck) ||
353 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
354 +               return 1;
355 +
356 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
357 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
358 +       while (1) {
359 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
360 +                       return 1;
361 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
362 +                       return 0;
363 +               cpu_relax(); /* spin until granted */
364 +       }
365 +}
366 +
367 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
368 +{
369 +       return ext4_htree_safe_locked(lck) ||
370 +              htree_node_is_granted(lck, ffz(~lmask));
371 +}
372 +
373 +static void ext4_htree_node_unlock(struct htree_lock *lck,
374 +                                  unsigned lmask, void *buf)
375 +{
376 +       /* NB: it's safe to call mutiple times or even it's not locked */
377 +       if (!ext4_htree_safe_locked(lck) &&
378 +            htree_node_is_granted(lck, ffz(~lmask)))
379 +               htree_node_unlock(lck, ffz(~lmask), buf);
380 +}
381 +
382 +#define ext4_htree_dx_lock(lck, key)           \
383 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
384 +#define ext4_htree_dx_lock_try(lck, key)       \
385 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
386 +#define ext4_htree_dx_unlock(lck)              \
387 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
388 +#define ext4_htree_dx_locked(lck)              \
389 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
390 +
391 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
392 +{
393 +       struct ext4_dir_lock_data *ld;
394 +
395 +       if (ext4_htree_safe_locked(lck))
396 +               return;
397 +
398 +       ld = ext4_htree_lock_data(lck);
399 +       switch (ld->ld_flags) {
400 +       default:
401 +               return;
402 +       case EXT4_HLOCK_LOOKUP:
403 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
404 +               return;
405 +       case EXT4_HLOCK_DEL:
406 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
407 +               return;
408 +       case EXT4_HLOCK_ADD:
409 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
410 +               return;
411 +       }
412 +}
413 +
414 +#define ext4_htree_de_lock(lck, key)           \
415 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
416 +#define ext4_htree_de_unlock(lck)              \
417 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
418 +
419 +#define ext4_htree_spin_lock(lck, key, event)  \
420 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
421 +#define ext4_htree_spin_unlock(lck)            \
422 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
423 +#define ext4_htree_spin_unlock_listen(lck, p)  \
424 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
425 +
426 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
427 +{
428 +       if (!ext4_htree_safe_locked(lck) &&
429 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
430 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
431 +}
432 +
433 +enum {
434 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
435 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
436 +       DX_HASH_COL_NO,         /* there is no collision */
437 +};
438 +
439 +static int dx_probe_hash_collision(struct htree_lock *lck,
440 +                                  struct dx_entry *entries,
441 +                                  struct dx_entry *at, u32 hash)
442 +{
443 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
444 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
445 +
446 +       } else if (at == entries + dx_get_count(entries) - 1) {
447 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
448 +
449 +       } else { /* hash collision? */
450 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
451 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
452 +       }
453 +}
454 +
455  /*
456   * Probe for a directory leaf block to search.
457   *
458 @@ -782,10 +1016,11 @@ struct stats dx_show_entries(struct dx_h
459   */
460  static struct dx_frame *
461  dx_probe(struct ext4_filename *fname, struct inode *dir,
462 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
463 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
464 +        struct htree_lock *lck)
465  {
466         unsigned count, indirect, level, i;
467 -       struct dx_entry *at, *entries, *p, *q, *m;
468 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
469         struct dx_root_info *info;
470         struct dx_frame *frame = frame_in;
471         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
472 @@ -851,8 +1086,15 @@ dx_probe(struct ext4_filename *fname, st
473         level = 0;
474         blocks[0] = 0;
475         while (1) {
476 +               if (indirect == 0) { /* the last index level */
477 +                       /* NB: ext4_htree_dx_lock() could be noop if
478 +                        * DX-lock flag is not set for current operation */
479 +                       ext4_htree_dx_lock(lck, dx);
480 +                       ext4_htree_spin_lock(lck, dx, NULL);
481 +               }
482                 count = dx_get_count(entries);
483 -               if (!count || count > dx_get_limit(entries)) {
484 +               if (count == 0 || count > dx_get_limit(entries)) {
485 +                       ext4_htree_spin_unlock(lck); /* release spin */
486                         ext4_warning_inode(dir,
487                                            "dx entry: count %u beyond limit %u",
488                                            count, dx_get_limit(entries));
489 @@ -901,8 +1143,70 @@ dx_probe(struct ext4_filename *fname, st
490                                 goto fail;
491                         }
492                 }
493 -               if (++level > indirect)
494 +
495 +               if (indirect == 0) { /* the last index level */
496 +                       struct ext4_dir_lock_data *ld;
497 +                       u64 myblock;
498 +
499 +                       /* By default we only lock DE-block, however, we will
500 +                        * also lock the last level DX-block if:
501 +                        * a) there is hash collision
502 +                        *    we will set DX-lock flag (a few lines below)
503 +                        *    and redo to lock DX-block
504 +                        *    see detail in dx_probe_hash_collision()
505 +                        * b) it's a retry from splitting
506 +                        *    we need to lock the last level DX-block so nobody
507 +                        *    else can split any leaf blocks under the same
508 +                        *    DX-block, see detail in ext4_dx_add_entry()
509 +                        */
510 +                       if (ext4_htree_dx_locked(lck)) {
511 +                               /* DX-block is locked, just lock DE-block
512 +                                * and return */
513 +                               ext4_htree_spin_unlock(lck);
514 +                               if (!ext4_htree_safe_locked(lck))
515 +                                       ext4_htree_de_lock(lck, frame->at);
516 +                               return frame;
517 +                       }
518 +                       /* it's pdirop and no DX lock */
519 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
520 +                           DX_HASH_COL_YES) {
521 +                               /* found hash collision, set DX-lock flag
522 +                                * and retry to abtain DX-lock */
523 +                               ext4_htree_spin_unlock(lck);
524 +                               ext4_htree_dx_need_lock(lck);
525 +                               continue;
526 +                       }
527 +                       ld = ext4_htree_lock_data(lck);
528 +                       /* because I don't lock DX, so @at can't be trusted
529 +                        * after I release spinlock so I have to save it */
530 +                       ld->ld_at = at;
531 +                       ld->ld_at_entry = *at;
532 +                       ld->ld_count = dx_get_count(entries);
533 +
534 +                       frame->at = &ld->ld_at_entry;
535 +                       myblock = dx_get_block(at);
536 +
537 +                       /* NB: ordering locking */
538 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
539 +                       /* other thread can split this DE-block because:
540 +                        * a) I don't have lock for the DE-block yet
541 +                        * b) I released spinlock on DX-block
542 +                        * if it happened I can detect it by listening
543 +                        * splitting event on this DE-block */
544 +                       ext4_htree_de_lock(lck, frame->at);
545 +                       ext4_htree_spin_stop_listen(lck);
546 +
547 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
548 +                               /* someone split this DE-block before
549 +                                * I locked it, I need to retry and lock
550 +                                * valid DE-block */
551 +                               ext4_htree_de_unlock(lck);
552 +                               continue;
553 +                       }
554                         return frame;
555 +               }
556 +               dx = at;
557 +               indirect--;
558                 blocks[level] = block;
559                 frame++;
560                 frame->bh = ext4_read_dirblock(dir, block, INDEX);
561 @@ -973,7 +1277,7 @@ static void dx_release(struct dx_frame *
562  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
563                                  struct dx_frame *frame,
564                                  struct dx_frame *frames,
565 -                                __u32 *start_hash)
566 +                                __u32 *start_hash, struct htree_lock *lck)
567  {
568         struct dx_frame *p;
569         struct buffer_head *bh;
570 @@ -988,12 +1292,22 @@ static int ext4_htree_next_block(struct
571          * this loop, num_frames indicates the number of interior
572          * nodes need to be read.
573          */
574 +       ext4_htree_de_unlock(lck);
575         while (1) {
576 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
577 -                       break;
578 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
579 +                       /* num_frames > 0 :
580 +                        *   DX block
581 +                        * ext4_htree_dx_locked:
582 +                        *   frame->at is reliable pointer returned by dx_probe,
583 +                        *   otherwise dx_probe already knew no collision */
584 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
585 +                               break;
586 +               }
587                 if (p == frames)
588                         return 0;
589                 num_frames++;
590 +               if (num_frames == 1)
591 +                       ext4_htree_dx_unlock(lck);
592                 p--;
593         }
594
595 @@ -1016,6 +1330,13 @@ static int ext4_htree_next_block(struct
596          * block so no check is necessary
597          */
598         while (num_frames--) {
599 +               if (num_frames == 0) {
600 +                       /* it's not always necessary, we just don't want to
601 +                        * detect hash collision again */
602 +                       ext4_htree_dx_need_lock(lck);
603 +                       ext4_htree_dx_lock(lck, p->at);
604 +               }
605 +
606                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
607                 if (IS_ERR(bh))
608                         return PTR_ERR(bh);
609 @@ -1024,6 +1345,7 @@ static int ext4_htree_next_block(struct
610                 p->bh = bh;
611                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
612         }
613 +       ext4_htree_de_lock(lck, p->at);
614         return 1;
615  }
616
617 @@ -1171,10 +1493,10 @@ int ext4_htree_fill_tree(struct file *di
618         }
619         hinfo.hash = start_hash;
620         hinfo.minor_hash = 0;
621 -       frame = dx_probe(NULL, dir, &hinfo, frames);
622 +       /* assume it's PR locked */
623 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
624         if (IS_ERR(frame))
625                 return PTR_ERR(frame);
626 -
627         /* Add '.' and '..' from the htree header */
628         if (!start_hash && !start_minor_hash) {
629                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
630 @@ -1214,7 +1536,7 @@ int ext4_htree_fill_tree(struct file *di
631                 count += ret;
632                 hashval = ~0;
633                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
634 -                                           frame, frames, &hashval);
635 +                                           frame, frames, &hashval, NULL);
636                 *next_hash = hashval;
637                 if (ret < 0) {
638                         err = ret;
639 @@ -1413,10 +1735,10 @@ static int is_dx_internal_node(struct in
640   * The returned buffer_head has ->b_count elevated.  The caller is expected
641   * to brelse() it when appropriate.
642   */
643 -static struct buffer_head * ext4_find_entry (struct inode *dir,
644 +struct buffer_head *__ext4_find_entry(struct inode *dir,
645                                         const struct qstr *d_name,
646                                         struct ext4_dir_entry_2 **res_dir,
647 -                                       int *inlined)
648 +                                       int *inlined, struct htree_lock *lck)
649  {
650         struct super_block *sb;
651         struct buffer_head *bh_use[NAMEI_RA_SIZE];
652 @@ -1465,7 +1787,7 @@ static struct buffer_head * ext4_find_en
653                 goto restart;
654         }
655         if (is_dx(dir)) {
656 -               ret = ext4_dx_find_entry(dir, &fname, res_dir);
657 +               ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
658                 /*
659                  * On success, or if the error was file not found,
660                  * return.  Otherwise, fall back to doing a search the
661 @@ -1475,6 +1797,7 @@ static struct buffer_head * ext4_find_en
662                         goto cleanup_and_exit;
663                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
664                                "falling back\n"));
665 +               ext4_htree_safe_relock(lck);
666                 ret = NULL;
667         }
668         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
669 @@ -1566,10 +1889,12 @@ cleanup_and_exit:
670         ext4_fname_free_filename(&fname);
671         return ret;
672  }
673 +EXPORT_SYMBOL(__ext4_find_entry);
674
675  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
676                         struct ext4_filename *fname,
677 -                       struct ext4_dir_entry_2 **res_dir)
678 +                       struct ext4_dir_entry_2 **res_dir,
679 +                       struct htree_lock *lck)
680  {
681         struct super_block * sb = dir->i_sb;
682         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
683 @@ -1580,7 +1905,7 @@ static struct buffer_head * ext4_dx_find
684  #ifdef CONFIG_EXT4_FS_ENCRYPTION
685         *res_dir = NULL;
686  #endif
687 -       frame = dx_probe(fname, dir, NULL, frames);
688 +       frame = dx_probe(fname, dir, NULL, frames, lck);
689         if (IS_ERR(frame))
690                 return (struct buffer_head *) frame;
691         do {
692 @@ -1602,7 +1927,7 @@ static struct buffer_head * ext4_dx_find
693
694                 /* Check to see if we should continue to search */
695                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
696 -                                              frames, NULL);
697 +                                              frames, NULL, lck);
698                 if (retval < 0) {
699                         ext4_warning_inode(dir,
700                                 "error %d reading directory index block",
701 @@ -1777,8 +2102,9 @@ static struct ext4_dir_entry_2* dx_pack_
702   * Returns pointer to de in block into which the new entry will be inserted.
703   */
704  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
705 -                       struct buffer_head **bh,struct dx_frame *frame,
706 -                       struct dx_hash_info *hinfo)
707 +                       struct buffer_head **bh, struct dx_frame *frames,
708 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
709 +                       struct htree_lock *lck)
710  {
711         unsigned blocksize = dir->i_sb->s_blocksize;
712         unsigned continued;
713 @@ -1854,8 +2180,14 @@ static struct ext4_dir_entry_2 *do_split
714                                         hash2, split, count-split));
715
716         /* Fancy dance to stay within two buffers */
717 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
718 -                             blocksize);
719 +       if (hinfo->hash < hash2) {
720 +               de2 = dx_move_dirents(data1, data2, map + split,
721 +                                     count - split, blocksize);
722 +       } else {
723 +               /* make sure we will add entry to the same block which
724 +                * we have already locked */
725 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
726 +       }
727         de = dx_pack_dirents(data1, blocksize);
728         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
729                                            (char *) de,
730 @@ -1876,12 +2208,21 @@ static struct ext4_dir_entry_2 *do_split
731         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
732                         blocksize, 1));
733
734 -       /* Which block gets the new entry? */
735 -       if (hinfo->hash >= hash2) {
736 -               swap(*bh, bh2);
737 -               de = de2;
738 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
739 +                            frame->at); /* notify block is being split */
740 +       if (hinfo->hash < hash2) {
741 +               dx_insert_block(frame, hash2 + continued, newblock);
742 +
743 +       } else {
744 +               /* switch block number */
745 +               dx_insert_block(frame, hash2 + continued,
746 +                               dx_get_block(frame->at));
747 +               dx_set_block(frame->at, newblock);
748 +               (frame->at)++;
749         }
750 -       dx_insert_block(frame, hash2 + continued, newblock);
751 +       ext4_htree_spin_unlock(lck);
752 +       ext4_htree_dx_unlock(lck);
753 +
754         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
755         if (err)
756                 goto journal_error;
757 @@ -2155,7 +2496,7 @@ static int make_indexed_dir(handle_t *ha
758         if (retval)
759                 goto out_frames;
760
761 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
762 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
763         if (IS_ERR(de)) {
764                 retval = PTR_ERR(de);
765                 goto out_frames;
766 @@ -2265,8 +2606,8 @@ out:
767   * may not sleep between calling this and putting something into
768   * the entry, as someone else might have used it while you slept.
769   */
770 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
771 -                         struct inode *inode)
772 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
773 +                     struct inode *inode, struct htree_lock *lck)
774  {
775         struct inode *dir = d_inode(dentry->d_parent);
776         struct buffer_head *bh = NULL;
777 @@ -2307,9 +2648,10 @@ static int ext4_add_entry(handle_t *hand
778                 if (dentry->d_name.len == 2 &&
779                     memcmp(dentry->d_name.name, "..", 2) == 0)
780                         return ext4_update_dotdot(handle, dentry, inode);
781 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
782 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
783                 if (!retval || (retval != ERR_BAD_DX_DIR))
784                         goto out;
785 +               ext4_htree_safe_relock(lck);
786                 /* Can we just ignore htree data? */
787                 if (ext4_has_metadata_csum(sb)) {
788                         EXT4_ERROR_INODE(dir,
789 @@ -2372,12 +2714,14 @@ out:
790                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
791         return retval;
792  }
793 +EXPORT_SYMBOL(__ext4_add_entry);
794
795  /*
796   * Returns 0 for success, or a negative error value
797   */
798  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
799 -                            struct inode *dir, struct inode *inode)
800 +                            struct inode *dir, struct inode *inode,
801 +                            struct htree_lock *lck)
802  {
803         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
804         struct dx_entry *entries, *at;
805 @@ -2389,7 +2733,7 @@ static int ext4_dx_add_entry(handle_t *h
806
807  again:
808         restart = 0;
809 -       frame = dx_probe(fname, dir, NULL, frames);
810 +       frame = dx_probe(fname, dir, NULL, frames, lck);
811         if (IS_ERR(frame))
812                 return PTR_ERR(frame);
813         entries = frame->entries;
814 @@ -2424,6 +2768,11 @@ again:
815                 struct dx_node *node2;
816                 struct buffer_head *bh2;
817
818 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
819 +                       ext4_htree_safe_relock(lck);
820 +                       restart = 1;
821 +                       goto cleanup;
822 +               }
823                 while (frame > frames) {
824                         if (dx_get_count((frame - 1)->entries) <
825                             dx_get_limit((frame - 1)->entries)) {
826 @@ -2525,8 +2874,32 @@ again:
827                         restart = 1;
828                         goto journal_error;
829                 }
830 +       } else if (!ext4_htree_dx_locked(lck)) {
831 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
832 +
833 +               /* not well protected, require DX lock */
834 +               ext4_htree_dx_need_lock(lck);
835 +               at = frame > frames ? (frame - 1)->at : NULL;
836 +
837 +               /* NB: no risk of deadlock because it's just a try.
838 +                *
839 +                * NB: we check ld_count for twice, the first time before
840 +                * having DX lock, the second time after holding DX lock.
841 +                *
842 +                * NB: We never free blocks for directory so far, which
843 +                * means value returned by dx_get_count() should equal to
844 +                * ld->ld_count if nobody split any DE-block under @at,
845 +                * and ld->ld_at still points to valid dx_entry. */
846 +               if ((ld->ld_count != dx_get_count(entries)) ||
847 +                   !ext4_htree_dx_lock_try(lck, at) ||
848 +                   (ld->ld_count != dx_get_count(entries))) {
849 +                       restart = 1;
850 +                       goto cleanup;
851 +               }
852 +               /* OK, I've got DX lock and nothing changed */
853 +               frame->at = ld->ld_at;
854         }
855 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
856 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
857         if (IS_ERR(de)) {
858                 err = PTR_ERR(de);
859                 goto cleanup;
860 @@ -2537,6 +2910,8 @@ again:
861  journal_error:
862         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
863  cleanup:
864 +       ext4_htree_dx_unlock(lck);
865 +       ext4_htree_de_unlock(lck);
866         brelse(bh);
867         dx_release(frames);
868         /* @restart is true means htree-path has been changed, we need to
869 Index: linux-4.18.0-423.el8/fs/ext4/super.c
870 ===================================================================
871 --- linux-4.18.0-423.el8.orig/fs/ext4/super.c
872 +++ linux-4.18.0-423.el8/fs/ext4/super.c
873 @@ -1136,6 +1136,7 @@ static struct inode *ext4_alloc_inode(st
874
875         inode_set_iversion(&ei->vfs_inode, 1);
876         spin_lock_init(&ei->i_raw_lock);
877 +       sema_init(&ei->i_append_sem, 1);
878         INIT_LIST_HEAD(&ei->i_prealloc_list);
879         spin_lock_init(&ei->i_prealloc_lock);
880         ext4_es_init_tree(&ei->i_es_tree);