Whamcloud - gitweb
LU-17599 ldiskfs: restore ldiskfs patch attribution
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ubuntu20 / ext4-pdirop.patch
1 LU-50 ldiskfs: pdirops patch for ldiskfs
2
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
14
15 This patch contains:
16   - pdirops support for ldiskfs
17   - N-level htree directory
18   - integrate with osd-ldiskfs
19
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
24 ---
25  fs/ext4/Makefile           |   1 +
26  fs/ext4/ext4.h             |  78 ++++
27  fs/ext4/htree_lock.c       | 891 +++++++++++++++++++++++++++++++++++++
28  fs/ext4/namei.c            | 454 +++++++++++++++++--
29  fs/ext4/super.c            |   1 +
30  include/linux/htree_lock.h | 187 ++++++++
31  6 files changed, 1572 insertions(+), 40 deletions(-)
32  create mode 100644 fs/ext4/htree_lock.c
33  create mode 100644 include/linux/htree_lock.h
34
35 diff -wur a/fs/ext4/ext4.h b/fs/ext4/ext4.h
36 --- a/fs/ext4/ext4.h    2020-08-30 12:06:02.782523259 -0600
37 +++ b/fs/ext4/ext4.h    2020-08-30 12:09:18.997212399 -0600
38 @@ -29,6 +29,7 @@
39  #include <linux/timer.h>
40  #include <linux/version.h>
41  #include <linux/wait.h>
42 +#include <linux/htree_lock.h>
43  #include <linux/sched/signal.h>
44  #include <linux/blockgroup_lock.h>
45  #include <linux/percpu_counter.h>
46 @@ -961,6 +962,9 @@
47         __u32   i_dtime;
48         ext4_fsblk_t    i_file_acl;
49
50 +       /* following fields for parallel directory operations -bzzz */
51 +       struct semaphore i_append_sem;
52 +
53         /*
54          * i_block_group is the number of the block group which contains
55          * this file's inode.  Constant across the lifetime of the inode,
56 @@ -2206,6 +2210,72 @@
57   */
58  #define HASH_NB_ALWAYS         1
59  
60 +/* assume name-hash is protected by upper layer */
61 +#define EXT4_HTREE_LOCK_HASH   0
62 +
63 +enum ext4_pdo_lk_types {
64 +#if EXT4_HTREE_LOCK_HASH
65 +       EXT4_LK_HASH,
66 +#endif
67 +       EXT4_LK_DX,             /* index block */
68 +       EXT4_LK_DE,             /* directory entry block */
69 +       EXT4_LK_SPIN,           /* spinlock */
70 +       EXT4_LK_MAX,
71 +};
72 +
73 +/* read-only bit */
74 +#define EXT4_LB_RO(b)          (1 << (b))
75 +/* read + write, high bits for writer */
76 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
77 +
78 +enum ext4_pdo_lock_bits {
79 +       /* DX lock bits */
80 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
81 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
82 +       /* DE lock bits */
83 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
84 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
85 +       /* DX spinlock bits */
86 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
87 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
88 +       /* accurate searching */
89 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
90 +};
91 +
92 +enum ext4_pdo_lock_opc {
93 +       /* external */
94 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
95 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
96 +                                  EXT4_LB_EXACT),
97 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
98 +                                  EXT4_LB_EXACT),
99 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
100 +
101 +       /* internal */
102 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
103 +                                  EXT4_LB_EXACT),
104 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
105 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
106 +};
107 +
108 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
109 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
110 +
111 +extern struct htree_lock *ext4_htree_lock_alloc(void);
112 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
113 +
114 +extern void ext4_htree_lock(struct htree_lock *lck,
115 +                           struct htree_lock_head *lhead,
116 +                           struct inode *dir, unsigned flags);
117 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
118 +
119 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
120 +                                       const struct qstr *d_name,
121 +                                       struct ext4_dir_entry_2 **res_dir,
122 +                                       int *inlined, struct htree_lock *lck);
123 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
124 +                     struct inode *inode, struct htree_lock *lck);
125 +
126  struct ext4_filename {
127         const struct qstr *usr_fname;
128         struct fscrypt_str disk_name;
129 @@ -2573,11 +2643,20 @@
130                         struct ext4_filename *fname, void *data);
131  static inline void ext4_update_dx_flag(struct inode *inode)
132  {
133 +       /* Disable it for ldiskfs, because going from a DX directory to
134 +        * a non-DX directory while it is in use will completely break
135 +        * the htree-locking.
136 +        * If we really want to support this operation in the future,
137 +        * we need to exclusively lock the directory at here which will
138 +        * increase complexity of code
139 +        */
140 +#if 0
141         if (!ext4_has_feature_dir_index(inode->i_sb)) {
142                 /* ext4_iget() should have caught this... */
143                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
144                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
145         }
146 +#endif
147  }
148  static const unsigned char ext4_filetype_table[] = {
149         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
150 diff -wur a/fs/ext4/Makefile b/fs/ext4/Makefile
151 --- a/fs/ext4/Makefile  2020-08-30 12:06:02.378525933 -0600
152 +++ b/fs/ext4/Makefile  2020-08-30 12:07:32.337927838 -0600
153 @@ -7,6 +7,7 @@
154  
155  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
156                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
157 +               htree_lock.o \
158                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
159                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
160                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
161 diff -wur a/fs/ext4/namei.c b/fs/ext4/namei.c
162 --- a/fs/ext4/namei.c   2020-08-30 12:06:02.746523498 -0600
163 +++ b/fs/ext4/namei.c   2020-08-30 12:11:25.136359125 -0600
164 @@ -55,6 +55,7 @@
165                                         ext4_lblk_t *block)
166  {
167         struct buffer_head *bh;
168 +       struct ext4_inode_info *ei = EXT4_I(inode);
169         int err;
170  
171         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
172 @@ -62,15 +63,22 @@
173                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
174                 return ERR_PTR(-ENOSPC);
175  
176 +       /* with parallel dir operations all appends
177 +       * have to be serialized -bzzz */
178 +       down(&ei->i_append_sem);
179 +
180         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
181  
182         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
183 -       if (IS_ERR(bh))
184 +       if (IS_ERR(bh)) {
185 +               up(&ei->i_append_sem);
186                 return bh;
187 +       }
188         inode->i_size += inode->i_sb->s_blocksize;
189         EXT4_I(inode)->i_disksize = inode->i_size;
190         BUFFER_TRACE(bh, "get_write_access");
191         err = ext4_journal_get_write_access(handle, bh);
192 +       up(&ei->i_append_sem);
193         if (err) {
194                 brelse(bh);
195                 ext4_std_error(inode->i_sb, err);
196 @@ -264,7 +272,8 @@
197  static struct dx_frame *dx_probe(struct ext4_filename *fname,
198                                  struct inode *dir,
199                                  struct dx_hash_info *hinfo,
200 -                                struct dx_frame *frame);
201 +                                struct dx_frame *frame,
202 +                                struct htree_lock *lck);
203  static void dx_release(struct dx_frame *frames);
204  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
205                        unsigned blocksize, struct dx_hash_info *hinfo,
206 @@ -278,12 +287,13 @@
207  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
208                                  struct dx_frame *frame,
209                                  struct dx_frame *frames,
210 -                                __u32 *start_hash);
211 +                                __u32 *start_hash, struct htree_lock *lck);
212  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
213                 struct ext4_filename *fname,
214 -               struct ext4_dir_entry_2 **res_dir);
215 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
216  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
217 -                            struct inode *dir, struct inode *inode);
218 +                            struct inode *dir, struct inode *inode,
219 +                            struct htree_lock *lck);
220  
221  /* checksumming functions */
222  void ext4_initialize_dirent_tail(struct buffer_head *bh,
223 @@ -748,6 +758,227 @@
224  }
225  #endif /* DX_DEBUG */
226  
227 +/* private data for htree_lock */
228 +struct ext4_dir_lock_data {
229 +       unsigned                ld_flags;  /* bits-map for lock types */
230 +       unsigned                ld_count;  /* # entries of the last DX block */
231 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
232 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
233 +};
234 +
235 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
236 +#define ext4_find_entry(dir, name, dirent, inline) \
237 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
238 +#define ext4_add_entry(handle, dentry, inode) \
239 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
240 +
241 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
242 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
243 +
244 +static void ext4_htree_event_cb(void *target, void *event)
245 +{
246 +       u64 *block = (u64 *)target;
247 +
248 +       if (*block == dx_get_block((struct dx_entry *)event))
249 +               *block = EXT4_HTREE_NODE_CHANGED;
250 +}
251 +
252 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
253 +{
254 +       struct htree_lock_head *lhead;
255 +
256 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
257 +       if (lhead != NULL) {
258 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
259 +                                       ext4_htree_event_cb);
260 +       }
261 +       return lhead;
262 +}
263 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
264 +
265 +struct htree_lock *ext4_htree_lock_alloc(void)
266 +{
267 +       return htree_lock_alloc(EXT4_LK_MAX,
268 +                               sizeof(struct ext4_dir_lock_data));
269 +}
270 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
271 +
272 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
273 +{
274 +       switch (flags) {
275 +       default: /* 0 or unknown flags require EX lock */
276 +               return HTREE_LOCK_EX;
277 +       case EXT4_HLOCK_READDIR:
278 +               return HTREE_LOCK_PR;
279 +       case EXT4_HLOCK_LOOKUP:
280 +               return HTREE_LOCK_CR;
281 +       case EXT4_HLOCK_DEL:
282 +       case EXT4_HLOCK_ADD:
283 +               return HTREE_LOCK_CW;
284 +       }
285 +}
286 +
287 +/* return PR for read-only operations, otherwise return EX */
288 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
289 +{
290 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
291 +
292 +       /* 0 requires EX lock */
293 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
294 +}
295 +
296 +static int ext4_htree_safe_locked(struct htree_lock *lck)
297 +{
298 +       int writer;
299 +
300 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
301 +               return 1;
302 +
303 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
304 +                EXT4_LB_DE;
305 +       if (writer) /* all readers & writers are excluded? */
306 +               return lck->lk_mode == HTREE_LOCK_EX;
307 +
308 +       /* all writers are excluded? */
309 +       return lck->lk_mode == HTREE_LOCK_PR ||
310 +              lck->lk_mode == HTREE_LOCK_PW ||
311 +              lck->lk_mode == HTREE_LOCK_EX;
312 +}
313 +
314 +/* relock htree_lock with EX mode if it's change operation, otherwise
315 + * relock it with PR mode. It's noop if PDO is disabled. */
316 +static void ext4_htree_safe_relock(struct htree_lock *lck)
317 +{
318 +       if (!ext4_htree_safe_locked(lck)) {
319 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
320 +
321 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
322 +       }
323 +}
324 +
325 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
326 +                    struct inode *dir, unsigned flags)
327 +{
328 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
329 +                                             ext4_htree_safe_mode(flags);
330 +
331 +       ext4_htree_lock_data(lck)->ld_flags = flags;
332 +       htree_lock(lck, lhead, mode);
333 +       if (!is_dx(dir))
334 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
335 +}
336 +EXPORT_SYMBOL(ext4_htree_lock);
337 +
338 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
339 +                               unsigned lmask, int wait, void *ev)
340 +{
341 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
342 +       u32     mode;
343 +
344 +       /* NOOP if htree is well protected or caller doesn't require the lock */
345 +       if (ext4_htree_safe_locked(lck) ||
346 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
347 +               return 1;
348 +
349 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
350 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
351 +       while (1) {
352 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
353 +                       return 1;
354 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
355 +                       return 0;
356 +               cpu_relax(); /* spin until granted */
357 +       }
358 +}
359 +
360 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
361 +{
362 +       return ext4_htree_safe_locked(lck) ||
363 +              htree_node_is_granted(lck, ffz(~lmask));
364 +}
365 +
366 +static void ext4_htree_node_unlock(struct htree_lock *lck,
367 +                                  unsigned lmask, void *buf)
368 +{
369 +       /* NB: it's safe to call mutiple times or even it's not locked */
370 +       if (!ext4_htree_safe_locked(lck) &&
371 +            htree_node_is_granted(lck, ffz(~lmask)))
372 +               htree_node_unlock(lck, ffz(~lmask), buf);
373 +}
374 +
375 +#define ext4_htree_dx_lock(lck, key)           \
376 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
377 +#define ext4_htree_dx_lock_try(lck, key)       \
378 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
379 +#define ext4_htree_dx_unlock(lck)              \
380 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
381 +#define ext4_htree_dx_locked(lck)              \
382 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
383 +
384 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
385 +{
386 +       struct ext4_dir_lock_data *ld;
387 +
388 +       if (ext4_htree_safe_locked(lck))
389 +               return;
390 +
391 +       ld = ext4_htree_lock_data(lck);
392 +       switch (ld->ld_flags) {
393 +       default:
394 +               return;
395 +       case EXT4_HLOCK_LOOKUP:
396 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
397 +               return;
398 +       case EXT4_HLOCK_DEL:
399 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
400 +               return;
401 +       case EXT4_HLOCK_ADD:
402 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
403 +               return;
404 +       }
405 +}
406 +
407 +#define ext4_htree_de_lock(lck, key)           \
408 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
409 +#define ext4_htree_de_unlock(lck)              \
410 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
411 +
412 +#define ext4_htree_spin_lock(lck, key, event)  \
413 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
414 +#define ext4_htree_spin_unlock(lck)            \
415 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
416 +#define ext4_htree_spin_unlock_listen(lck, p)  \
417 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
418 +
419 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
420 +{
421 +       if (!ext4_htree_safe_locked(lck) &&
422 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
423 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
424 +}
425 +
426 +enum {
427 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
428 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
429 +       DX_HASH_COL_NO,         /* there is no collision */
430 +};
431 +
432 +static int dx_probe_hash_collision(struct htree_lock *lck,
433 +                                  struct dx_entry *entries,
434 +                                  struct dx_entry *at, u32 hash)
435 +{
436 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
437 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
438 +
439 +       } else if (at == entries + dx_get_count(entries) - 1) {
440 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
441 +
442 +       } else { /* hash collision? */
443 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
444 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
445 +       }
446 +}
447 +
448  /*
449   * Probe for a directory leaf block to search.
450   *
451 @@ -759,10 +990,11 @@
452   */
453  static struct dx_frame *
454  dx_probe(struct ext4_filename *fname, struct inode *dir,
455 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
456 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
457 +        struct htree_lock *lck)
458  {
459         unsigned count, indirect;
460 -       struct dx_entry *at, *entries, *p, *q, *m;
461 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
462         struct dx_root_info *info;
463         struct dx_frame *frame = frame_in;
464         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
465 @@ -824,8 +1056,15 @@
466  
467         dxtrace(printk("Look up %x", hash));
468         while (1) {
469 +               if (indirect == 0) { /* the last index level */
470 +                       /* NB: ext4_htree_dx_lock() could be noop if
471 +                        * DX-lock flag is not set for current operation */
472 +                       ext4_htree_dx_lock(lck, dx);
473 +                       ext4_htree_spin_lock(lck, dx, NULL);
474 +               }
475                 count = dx_get_count(entries);
476 -               if (!count || count > dx_get_limit(entries)) {
477 +               if (count == 0 || count > dx_get_limit(entries)) {
478 +                       ext4_htree_spin_unlock(lck); /* release spin */
479                         ext4_warning_inode(dir,
480                                            "dx entry: count %u beyond limit %u",
481                                            count, dx_get_limit(entries));
482 @@ -864,8 +1103,70 @@
483                                dx_get_block(at)));
484                 frame->entries = entries;
485                 frame->at = at;
486 -               if (!indirect--)
487 +
488 +               if (indirect == 0) { /* the last index level */
489 +                       struct ext4_dir_lock_data *ld;
490 +                       u64 myblock;
491 +
492 +                       /* By default we only lock DE-block, however, we will
493 +                        * also lock the last level DX-block if:
494 +                        * a) there is hash collision
495 +                        *    we will set DX-lock flag (a few lines below)
496 +                        *    and redo to lock DX-block
497 +                        *    see detail in dx_probe_hash_collision()
498 +                        * b) it's a retry from splitting
499 +                        *    we need to lock the last level DX-block so nobody
500 +                        *    else can split any leaf blocks under the same
501 +                        *    DX-block, see detail in ext4_dx_add_entry()
502 +                        */
503 +                       if (ext4_htree_dx_locked(lck)) {
504 +                               /* DX-block is locked, just lock DE-block
505 +                                * and return */
506 +                               ext4_htree_spin_unlock(lck);
507 +                               if (!ext4_htree_safe_locked(lck))
508 +                                       ext4_htree_de_lock(lck, frame->at);
509 +                               return frame;
510 +                       }
511 +                       /* it's pdirop and no DX lock */
512 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
513 +                           DX_HASH_COL_YES) {
514 +                               /* found hash collision, set DX-lock flag
515 +                                * and retry to abtain DX-lock */
516 +                               ext4_htree_spin_unlock(lck);
517 +                               ext4_htree_dx_need_lock(lck);
518 +                               continue;
519 +                       }
520 +                       ld = ext4_htree_lock_data(lck);
521 +                       /* because I don't lock DX, so @at can't be trusted
522 +                        * after I release spinlock so I have to save it */
523 +                       ld->ld_at = at;
524 +                       ld->ld_at_entry = *at;
525 +                       ld->ld_count = dx_get_count(entries);
526 +
527 +                       frame->at = &ld->ld_at_entry;
528 +                       myblock = dx_get_block(at);
529 +
530 +                       /* NB: ordering locking */
531 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
532 +                       /* other thread can split this DE-block because:
533 +                        * a) I don't have lock for the DE-block yet
534 +                        * b) I released spinlock on DX-block
535 +                        * if it happened I can detect it by listening
536 +                        * splitting event on this DE-block */
537 +                       ext4_htree_de_lock(lck, frame->at);
538 +                       ext4_htree_spin_stop_listen(lck);
539 +
540 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
541 +                               /* someone split this DE-block before
542 +                                * I locked it, I need to retry and lock
543 +                                * valid DE-block */
544 +                               ext4_htree_de_unlock(lck);
545 +                               continue;
546 +                       }
547                         return frame;
548 +               }
549 +               dx = at;
550 +               indirect--;
551                 frame++;
552                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
553                 if (IS_ERR(frame->bh)) {
554 @@ -934,7 +1235,7 @@
555  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
556                                  struct dx_frame *frame,
557                                  struct dx_frame *frames,
558 -                                __u32 *start_hash)
559 +                                __u32 *start_hash, struct htree_lock *lck)
560  {
561         struct dx_frame *p;
562         struct buffer_head *bh;
563 @@ -949,12 +1250,22 @@
564          * this loop, num_frames indicates the number of interior
565          * nodes need to be read.
566          */
567 +       ext4_htree_de_unlock(lck);
568         while (1) {
569 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
570 +                       /* num_frames > 0 :
571 +                        *   DX block
572 +                        * ext4_htree_dx_locked:
573 +                        *   frame->at is reliable pointer returned by dx_probe,
574 +                        *   otherwise dx_probe already knew no collision */
575                 if (++(p->at) < p->entries + dx_get_count(p->entries))
576                         break;
577 +               }
578                 if (p == frames)
579                         return 0;
580                 num_frames++;
581 +               if (num_frames == 1)
582 +                       ext4_htree_dx_unlock(lck);
583                 p--;
584         }
585  
586 @@ -977,6 +1288,13 @@
587          * block so no check is necessary
588          */
589         while (num_frames--) {
590 +               if (num_frames == 0) {
591 +                       /* it's not always necessary, we just don't want to
592 +                        * detect hash collision again */
593 +                       ext4_htree_dx_need_lock(lck);
594 +                       ext4_htree_dx_lock(lck, p->at);
595 +               }
596 +
597                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
598                 if (IS_ERR(bh))
599                         return PTR_ERR(bh);
600 @@ -985,6 +1303,7 @@
601                 p->bh = bh;
602                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
603         }
604 +       ext4_htree_de_lock(lck, p->at);
605         return 1;
606  }
607  
608 @@ -1132,10 +1451,10 @@
609         }
610         hinfo.hash = start_hash;
611         hinfo.minor_hash = 0;
612 -       frame = dx_probe(NULL, dir, &hinfo, frames);
613 +       /* assume it's PR locked */
614 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
615         if (IS_ERR(frame))
616                 return PTR_ERR(frame);
617 -
618         /* Add '.' and '..' from the htree header */
619         if (!start_hash && !start_minor_hash) {
620                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
621 @@ -1175,7 +1494,7 @@
622                 count += ret;
623                 hashval = ~0;
624                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
625 -                                           frame, frames, &hashval);
626 +                                           frame, frames, &hashval, NULL);
627                 *next_hash = hashval;
628                 if (ret < 0) {
629                         err = ret;
630 @@ -1451,7 +1770,7 @@
631  static struct buffer_head *__ext4_find_entry(struct inode *dir,
632                                              struct ext4_filename *fname,
633                                              struct ext4_dir_entry_2 **res_dir,
634 -                                            int *inlined)
635 +                                            int *inlined, struct htree_lock *lck)
636  {
637         struct super_block *sb;
638         struct buffer_head *bh_use[NAMEI_RA_SIZE];
639 @@ -1493,7 +1812,7 @@
640                 goto restart;
641         }
642         if (is_dx(dir)) {
643 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
644 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
645                 /*
646                  * On success, or if the error was file not found,
647                  * return.  Otherwise, fall back to doing a search the
648 @@ -1503,6 +1822,7 @@
649                         goto cleanup_and_exit;
650                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
651                                "falling back\n"));
652 +               ext4_htree_safe_relock(lck);
653                 ret = NULL;
654         }
655         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
656 @@ -1591,10 +1911,10 @@
657         return ret;
658  }
659  
660 -static struct buffer_head *ext4_find_entry(struct inode *dir,
661 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
662                                            const struct qstr *d_name,
663                                            struct ext4_dir_entry_2 **res_dir,
664 -                                          int *inlined)
665 +                                          int *inlined, struct htree_lock *lck)
666  {
667         int err;
668         struct ext4_filename fname;
669 @@ -1606,12 +1926,14 @@
670         if (err)
671                 return ERR_PTR(err);
672  
673 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
674 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
675  
676         ext4_fname_free_filename(&fname);
677         return bh;
678  }
679  
680 +EXPORT_SYMBOL(ext4_find_entry_locked);
681 +
682  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
683                                              struct dentry *dentry,
684                                              struct ext4_dir_entry_2 **res_dir)
685 @@ -1626,7 +1948,7 @@
686         if (err)
687                 return ERR_PTR(err);
688  
689 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
690 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
691  
692         ext4_fname_free_filename(&fname);
693         return bh;
694 @@ -1634,7 +1956,8 @@
695  
696  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
697                         struct ext4_filename *fname,
698 -                       struct ext4_dir_entry_2 **res_dir)
699 +                       struct ext4_dir_entry_2 **res_dir,
700 +                       struct htree_lock *lck)
701  {
702         struct super_block * sb = dir->i_sb;
703         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
704 @@ -1645,7 +1968,7 @@
705  #ifdef CONFIG_FS_ENCRYPTION
706         *res_dir = NULL;
707  #endif
708 -       frame = dx_probe(fname, dir, NULL, frames);
709 +       frame = dx_probe(fname, dir, NULL, frames, lck);
710         if (IS_ERR(frame))
711                 return (struct buffer_head *) frame;
712         do {
713 @@ -1667,7 +1990,7 @@
714  
715                 /* Check to see if we should continue to search */
716                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
717 -                                              frames, NULL);
718 +                                              frames, NULL, lck);
719                 if (retval < 0) {
720                         ext4_warning_inode(dir,
721                                 "error %d reading directory index block",
722 @@ -1847,8 +2170,9 @@
723   * Returns pointer to de in block into which the new entry will be inserted.
724   */
725  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
726 -                       struct buffer_head **bh,struct dx_frame *frame,
727 -                       struct dx_hash_info *hinfo)
728 +                       struct buffer_head **bh, struct dx_frame *frames,
729 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
730 +                       struct htree_lock *lck)
731  {
732         unsigned blocksize = dir->i_sb->s_blocksize;
733         unsigned count, continued;
734 @@ -1909,8 +2233,14 @@
735                                         hash2, split, count-split));
736  
737         /* Fancy dance to stay within two buffers */
738 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
739 -                             blocksize);
740 +       if (hinfo->hash < hash2) {
741 +               de2 = dx_move_dirents(data1, data2, map + split,
742 +                                     count - split, blocksize);
743 +       } else {
744 +               /* make sure we will add entry to the same block which
745 +                * we have already locked */
746 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
747 +       }
748         de = dx_pack_dirents(data1, blocksize);
749         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
750                                            (char *) de,
751 @@ -1928,12 +2258,21 @@
752         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
753                         blocksize, 1));
754  
755 -       /* Which block gets the new entry? */
756 -       if (hinfo->hash >= hash2) {
757 -               swap(*bh, bh2);
758 -               de = de2;
759 -       }
760 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
761 +                            frame->at); /* notify block is being split */
762 +       if (hinfo->hash < hash2) {
763         dx_insert_block(frame, hash2 + continued, newblock);
764 +
765 +       } else {
766 +               /* switch block number */
767 +               dx_insert_block(frame, hash2 + continued,
768 +                               dx_get_block(frame->at));
769 +               dx_set_block(frame->at, newblock);
770 +               (frame->at)++;
771 +       }
772 +       ext4_htree_spin_unlock(lck);
773 +       ext4_htree_dx_unlock(lck);
774 +
775         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
776         if (err)
777                 goto journal_error;
778 @@ -2203,7 +2542,7 @@
779         if (retval)
780                 goto out_frames;        
781  
782 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
783 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
784         if (IS_ERR(de)) {
785                 retval = PTR_ERR(de);
786                 goto out_frames;
787 @@ -2313,8 +2652,8 @@
788   * may not sleep between calling this and putting something into
789   * the entry, as someone else might have used it while you slept.
790   */
791 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
792 -                         struct inode *inode)
793 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
794 +                         struct inode *inode, struct htree_lock *lck)
795  {
796         struct inode *dir = d_inode(dentry->d_parent);
797         struct buffer_head *bh = NULL;
798 @@ -2362,9 +2701,10 @@
799                 if (dentry->d_name.len == 2 &&
800                     memcmp(dentry->d_name.name, "..", 2) == 0)
801                         return ext4_update_dotdot(handle, dentry, inode);
802 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
803 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
804                 if (!retval || (retval != ERR_BAD_DX_DIR))
805                         goto out;
806 +               ext4_htree_safe_relock(lck);
807                 /* Can we just ignore htree data? */
808                 if (ext4_has_metadata_csum(sb)) {
809                         EXT4_ERROR_INODE(dir,
810 @@ -2425,12 +2765,14 @@
811                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
812         return retval;
813  }
814 +EXPORT_SYMBOL(ext4_add_entry_locked);
815  
816  /*
817   * Returns 0 for success, or a negative error value
818   */
819  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
820 -                            struct inode *dir, struct inode *inode)
821 +                            struct inode *dir, struct inode *inode,
822 +                            struct htree_lock *lck)
823  {
824         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
825         struct dx_entry *entries, *at;
826 @@ -2442,7 +2784,7 @@
827  
828  again:
829         restart = 0;
830 -       frame = dx_probe(fname, dir, NULL, frames);
831 +       frame = dx_probe(fname, dir, NULL, frames, lck);
832         if (IS_ERR(frame))
833                 return PTR_ERR(frame);
834         entries = frame->entries;
835 @@ -2477,6 +2819,12 @@
836                 struct dx_node *node2;
837                 struct buffer_head *bh2;
838  
839 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
840 +                       ext4_htree_safe_relock(lck);
841 +                       restart = 1;
842 +                       goto cleanup;
843 +               }
844 +
845                 while (frame > frames) {
846                         if (dx_get_count((frame - 1)->entries) <
847                             dx_get_limit((frame - 1)->entries)) {
848 @@ -2579,8 +2927,32 @@
849                         restart = 1;
850                         goto journal_error;
851                 }
852 +       } else if (!ext4_htree_dx_locked(lck)) {
853 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
854 +
855 +               /* not well protected, require DX lock */
856 +               ext4_htree_dx_need_lock(lck);
857 +               at = frame > frames ? (frame - 1)->at : NULL;
858 +
859 +               /* NB: no risk of deadlock because it's just a try.
860 +                *
861 +                * NB: we check ld_count for twice, the first time before
862 +                * having DX lock, the second time after holding DX lock.
863 +                *
864 +                * NB: We never free blocks for directory so far, which
865 +                * means value returned by dx_get_count() should equal to
866 +                * ld->ld_count if nobody split any DE-block under @at,
867 +                * and ld->ld_at still points to valid dx_entry. */
868 +               if ((ld->ld_count != dx_get_count(entries)) ||
869 +                   !ext4_htree_dx_lock_try(lck, at) ||
870 +                   (ld->ld_count != dx_get_count(entries))) {
871 +                       restart = 1;
872 +                       goto cleanup;
873 +               }
874 +               /* OK, I've got DX lock and nothing changed */
875 +               frame->at = ld->ld_at;
876         }
877 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
878 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
879         if (IS_ERR(de)) {
880                 err = PTR_ERR(de);
881                 goto cleanup;
882 @@ -2591,6 +2963,8 @@
883  journal_error:
884         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
885  cleanup:
886 +       ext4_htree_dx_unlock(lck);
887 +       ext4_htree_de_unlock(lck);
888         brelse(bh);
889         dx_release(frames);
890         /* @restart is true means htree-path has been changed, we need to
891 diff -wur a/fs/ext4/super.c b/fs/ext4/super.c
892 --- a/fs/ext4/super.c   2020-08-30 12:06:02.746523498 -0600
893 +++ b/fs/ext4/super.c   2020-08-30 12:07:32.345927785 -0600
894 @@ -1087,6 +1087,7 @@
895  
896         inode_set_iversion(&ei->vfs_inode, 1);
897         spin_lock_init(&ei->i_raw_lock);
898 +       sema_init(&ei->i_append_sem, 1);
899         INIT_LIST_HEAD(&ei->i_prealloc_list);
900         spin_lock_init(&ei->i_prealloc_lock);
901         ext4_es_init_tree(&ei->i_es_tree);