1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
14 - pdirops support for ldiskfs
15 - integrate with osd-ldiskfs
17 Index: linux-4.18.0-423.el8/fs/ext4/Makefile
18 ===================================================================
19 --- linux-4.18.0-423.el8.orig/fs/ext4/Makefile
20 +++ linux-4.18.0-423.el8/fs/ext4/Makefile
21 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
23 ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
24 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
26 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
27 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
28 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
29 Index: linux-4.18.0-423.el8/fs/ext4/ext4.h
30 ===================================================================
31 --- linux-4.18.0-423.el8.orig/fs/ext4/ext4.h
32 +++ linux-4.18.0-423.el8/fs/ext4/ext4.h
34 #include <linux/timer.h>
35 #include <linux/version.h>
36 #include <linux/wait.h>
37 +#include <linux/htree_lock.h>
38 #include <linux/sched/signal.h>
39 #include <linux/blockgroup_lock.h>
40 #include <linux/percpu_counter.h>
41 @@ -966,6 +967,9 @@ struct ext4_inode_info {
43 ext4_fsblk_t i_file_acl;
45 + /* following fields for parallel directory operations -bzzz */
46 + struct semaphore i_append_sem;
49 * i_block_group is the number of the block group which contains
50 * this file's inode. Constant across the lifetime of the inode,
51 @@ -2217,6 +2221,72 @@ struct dx_hash_info
53 #define HASH_NB_ALWAYS 1
55 +/* assume name-hash is protected by upper layer */
56 +#define EXT4_HTREE_LOCK_HASH 0
58 +enum ext4_pdo_lk_types {
59 +#if EXT4_HTREE_LOCK_HASH
62 + EXT4_LK_DX, /* index block */
63 + EXT4_LK_DE, /* directory entry block */
64 + EXT4_LK_SPIN, /* spinlock */
69 +#define EXT4_LB_RO(b) (1 << (b))
70 +/* read + write, high bits for writer */
71 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
73 +enum ext4_pdo_lock_bits {
75 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
76 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
78 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
79 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
80 + /* DX spinlock bits */
81 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
82 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
83 + /* accurate searching */
84 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
87 +enum ext4_pdo_lock_opc {
89 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
90 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
92 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
94 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
97 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
99 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
100 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
103 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
104 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
106 +extern struct htree_lock *ext4_htree_lock_alloc(void);
107 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
109 +extern void ext4_htree_lock(struct htree_lock *lck,
110 + struct htree_lock_head *lhead,
111 + struct inode *dir, unsigned flags);
112 +#define ext4_htree_unlock(lck) htree_unlock(lck)
114 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
115 + const struct qstr *d_name,
116 + struct ext4_dir_entry_2 **res_dir,
117 + int *inlined, struct htree_lock *lck);
118 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
119 + struct inode *inode, struct htree_lock *lck);
121 struct ext4_filename {
122 const struct qstr *usr_fname;
123 struct fscrypt_str disk_name;
124 @@ -2519,12 +2589,20 @@ void ext4_insert_dentry(struct inode *in
125 struct ext4_filename *fname, void *data);
126 static inline void ext4_update_dx_flag(struct inode *inode)
128 + /* Disable it for ldiskfs, because going from a DX directory to
129 + * a non-DX directory while it is in use will completely break
130 + * the htree-locking.
131 + * If we really want to support this operation in the future,
132 + * we need to exclusively lock the directory at here which will
133 + * increase complexity of code */
135 if (!ext4_has_feature_dir_index(inode->i_sb) &&
136 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
137 /* ext4_iget() should have caught this... */
138 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
139 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
143 static const unsigned char ext4_filetype_table[] = {
144 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
145 Index: linux-4.18.0-423.el8/fs/ext4/namei.c
146 ===================================================================
147 --- linux-4.18.0-423.el8.orig/fs/ext4/namei.c
148 +++ linux-4.18.0-423.el8/fs/ext4/namei.c
149 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
151 struct ext4_map_blocks map;
152 struct buffer_head *bh;
153 + struct ext4_inode_info *ei = EXT4_I(inode);
156 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
157 @@ -62,6 +63,10 @@ struct buffer_head *ext4_append(handle_t
158 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
159 return ERR_PTR(-ENOSPC);
161 + /* with parallel dir operations all appends
162 + * have to be serialized -bzzz */
163 + down(&ei->i_append_sem);
165 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
168 @@ -72,18 +77,24 @@ struct buffer_head *ext4_append(handle_t
171 err = ext4_map_blocks(NULL, inode, &map, 0);
174 + up(&ei->i_append_sem);
178 + up(&ei->i_append_sem);
179 EXT4_ERROR_INODE(inode, "Logical block already allocated");
180 return ERR_PTR(-EFSCORRUPTED);
183 bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
186 + up(&ei->i_append_sem);
189 inode->i_size += inode->i_sb->s_blocksize;
190 EXT4_I(inode)->i_disksize = inode->i_size;
191 + up(&ei->i_append_sem);
192 BUFFER_TRACE(bh, "get_write_access");
193 err = ext4_journal_get_write_access(handle, bh);
195 @@ -288,7 +299,8 @@ static unsigned dx_node_limit(struct ino
196 static struct dx_frame *dx_probe(struct ext4_filename *fname,
198 struct dx_hash_info *hinfo,
199 - struct dx_frame *frame);
200 + struct dx_frame *frame,
201 + struct htree_lock *lck);
202 static void dx_release(struct dx_frame *frames);
203 static int dx_make_map(struct inode *dir, struct buffer_head *bh,
204 struct dx_hash_info *hinfo,
205 @@ -302,12 +314,13 @@ static void dx_insert_block(struct dx_fr
206 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
207 struct dx_frame *frame,
208 struct dx_frame *frames,
209 - __u32 *start_hash);
210 + __u32 *start_hash, struct htree_lock *lck);
211 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
212 struct ext4_filename *fname,
213 - struct ext4_dir_entry_2 **res_dir);
214 + struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
215 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
216 - struct inode *dir, struct inode *inode);
217 + struct inode *dir, struct inode *inode,
218 + struct htree_lock *lck);
220 /* checksumming functions */
221 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
222 @@ -771,6 +784,227 @@ struct stats dx_show_entries(struct dx_h
224 #endif /* DX_DEBUG */
226 +/* private data for htree_lock */
227 +struct ext4_dir_lock_data {
228 + unsigned ld_flags; /* bits-map for lock types */
229 + unsigned ld_count; /* # entries of the last DX block */
230 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
231 + struct dx_entry *ld_at; /* position of leaf dx_entry */
234 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
235 +#define ext4_find_entry(dir, name, dirent, inline) \
236 + __ext4_find_entry(dir, name, dirent, inline, NULL)
237 +#define ext4_add_entry(handle, dentry, inode) \
238 + __ext4_add_entry(handle, dentry, inode, NULL)
240 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
241 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
243 +static void ext4_htree_event_cb(void *target, void *event)
245 + u64 *block = (u64 *)target;
247 + if (*block == dx_get_block((struct dx_entry *)event))
248 + *block = EXT4_HTREE_NODE_CHANGED;
251 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
253 + struct htree_lock_head *lhead;
255 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
256 + if (lhead != NULL) {
257 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
258 + ext4_htree_event_cb);
262 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
264 +struct htree_lock *ext4_htree_lock_alloc(void)
266 + return htree_lock_alloc(EXT4_LK_MAX,
267 + sizeof(struct ext4_dir_lock_data));
269 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
271 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
274 + default: /* 0 or unknown flags require EX lock */
275 + return HTREE_LOCK_EX;
276 + case EXT4_HLOCK_READDIR:
277 + return HTREE_LOCK_PR;
278 + case EXT4_HLOCK_LOOKUP:
279 + return HTREE_LOCK_CR;
280 + case EXT4_HLOCK_DEL:
281 + case EXT4_HLOCK_ADD:
282 + return HTREE_LOCK_CW;
286 +/* return PR for read-only operations, otherwise return EX */
287 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
289 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
291 + /* 0 requires EX lock */
292 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
295 +static int ext4_htree_safe_locked(struct htree_lock *lck)
299 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
302 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
304 + if (writer) /* all readers & writers are excluded? */
305 + return lck->lk_mode == HTREE_LOCK_EX;
307 + /* all writers are excluded? */
308 + return lck->lk_mode == HTREE_LOCK_PR ||
309 + lck->lk_mode == HTREE_LOCK_PW ||
310 + lck->lk_mode == HTREE_LOCK_EX;
313 +/* relock htree_lock with EX mode if it's change operation, otherwise
314 + * relock it with PR mode. It's noop if PDO is disabled. */
315 +static void ext4_htree_safe_relock(struct htree_lock *lck)
317 + if (!ext4_htree_safe_locked(lck)) {
318 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
320 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
324 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
325 + struct inode *dir, unsigned flags)
327 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
328 + ext4_htree_safe_mode(flags);
330 + ext4_htree_lock_data(lck)->ld_flags = flags;
331 + htree_lock(lck, lhead, mode);
333 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
335 +EXPORT_SYMBOL(ext4_htree_lock);
337 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
338 + unsigned lmask, int wait, void *ev)
340 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
343 + /* NOOP if htree is well protected or caller doesn't require the lock */
344 + if (ext4_htree_safe_locked(lck) ||
345 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
348 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
349 + HTREE_LOCK_PW : HTREE_LOCK_PR;
351 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
353 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
355 + cpu_relax(); /* spin until granted */
359 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
361 + return ext4_htree_safe_locked(lck) ||
362 + htree_node_is_granted(lck, ffz(~lmask));
365 +static void ext4_htree_node_unlock(struct htree_lock *lck,
366 + unsigned lmask, void *buf)
368 + /* NB: it's safe to call mutiple times or even it's not locked */
369 + if (!ext4_htree_safe_locked(lck) &&
370 + htree_node_is_granted(lck, ffz(~lmask)))
371 + htree_node_unlock(lck, ffz(~lmask), buf);
374 +#define ext4_htree_dx_lock(lck, key) \
375 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
376 +#define ext4_htree_dx_lock_try(lck, key) \
377 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
378 +#define ext4_htree_dx_unlock(lck) \
379 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
380 +#define ext4_htree_dx_locked(lck) \
381 + ext4_htree_node_locked(lck, EXT4_LB_DX)
383 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
385 + struct ext4_dir_lock_data *ld;
387 + if (ext4_htree_safe_locked(lck))
390 + ld = ext4_htree_lock_data(lck);
391 + switch (ld->ld_flags) {
394 + case EXT4_HLOCK_LOOKUP:
395 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
397 + case EXT4_HLOCK_DEL:
398 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
400 + case EXT4_HLOCK_ADD:
401 + ld->ld_flags = EXT4_HLOCK_SPLIT;
406 +#define ext4_htree_de_lock(lck, key) \
407 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
408 +#define ext4_htree_de_unlock(lck) \
409 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
411 +#define ext4_htree_spin_lock(lck, key, event) \
412 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
413 +#define ext4_htree_spin_unlock(lck) \
414 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
415 +#define ext4_htree_spin_unlock_listen(lck, p) \
416 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
418 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
420 + if (!ext4_htree_safe_locked(lck) &&
421 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
422 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
426 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
427 + DX_HASH_COL_YES, /* there is collision and it does matter */
428 + DX_HASH_COL_NO, /* there is no collision */
431 +static int dx_probe_hash_collision(struct htree_lock *lck,
432 + struct dx_entry *entries,
433 + struct dx_entry *at, u32 hash)
435 + if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
436 + return DX_HASH_COL_IGNORE; /* don't care about collision */
438 + } else if (at == entries + dx_get_count(entries) - 1) {
439 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
441 + } else { /* hash collision? */
442 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
443 + DX_HASH_COL_YES : DX_HASH_COL_NO;
448 * Probe for a directory leaf block to search.
450 @@ -782,10 +1016,11 @@ struct stats dx_show_entries(struct dx_h
452 static struct dx_frame *
453 dx_probe(struct ext4_filename *fname, struct inode *dir,
454 - struct dx_hash_info *hinfo, struct dx_frame *frame_in)
455 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
456 + struct htree_lock *lck)
458 unsigned count, indirect, level, i;
459 - struct dx_entry *at, *entries, *p, *q, *m;
460 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
461 struct dx_root_info *info;
462 struct dx_frame *frame = frame_in;
463 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
464 @@ -851,8 +1086,15 @@ dx_probe(struct ext4_filename *fname, st
468 + if (indirect == 0) { /* the last index level */
469 + /* NB: ext4_htree_dx_lock() could be noop if
470 + * DX-lock flag is not set for current operation */
471 + ext4_htree_dx_lock(lck, dx);
472 + ext4_htree_spin_lock(lck, dx, NULL);
474 count = dx_get_count(entries);
475 - if (!count || count > dx_get_limit(entries)) {
476 + if (count == 0 || count > dx_get_limit(entries)) {
477 + ext4_htree_spin_unlock(lck); /* release spin */
478 ext4_warning_inode(dir,
479 "dx entry: count %u beyond limit %u",
480 count, dx_get_limit(entries));
481 @@ -901,8 +1143,70 @@ dx_probe(struct ext4_filename *fname, st
485 - if (++level > indirect)
487 + if (indirect == 0) { /* the last index level */
488 + struct ext4_dir_lock_data *ld;
491 + /* By default we only lock DE-block, however, we will
492 + * also lock the last level DX-block if:
493 + * a) there is hash collision
494 + * we will set DX-lock flag (a few lines below)
495 + * and redo to lock DX-block
496 + * see detail in dx_probe_hash_collision()
497 + * b) it's a retry from splitting
498 + * we need to lock the last level DX-block so nobody
499 + * else can split any leaf blocks under the same
500 + * DX-block, see detail in ext4_dx_add_entry()
502 + if (ext4_htree_dx_locked(lck)) {
503 + /* DX-block is locked, just lock DE-block
505 + ext4_htree_spin_unlock(lck);
506 + if (!ext4_htree_safe_locked(lck))
507 + ext4_htree_de_lock(lck, frame->at);
510 + /* it's pdirop and no DX lock */
511 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
513 + /* found hash collision, set DX-lock flag
514 + * and retry to abtain DX-lock */
515 + ext4_htree_spin_unlock(lck);
516 + ext4_htree_dx_need_lock(lck);
519 + ld = ext4_htree_lock_data(lck);
520 + /* because I don't lock DX, so @at can't be trusted
521 + * after I release spinlock so I have to save it */
523 + ld->ld_at_entry = *at;
524 + ld->ld_count = dx_get_count(entries);
526 + frame->at = &ld->ld_at_entry;
527 + myblock = dx_get_block(at);
529 + /* NB: ordering locking */
530 + ext4_htree_spin_unlock_listen(lck, &myblock);
531 + /* other thread can split this DE-block because:
532 + * a) I don't have lock for the DE-block yet
533 + * b) I released spinlock on DX-block
534 + * if it happened I can detect it by listening
535 + * splitting event on this DE-block */
536 + ext4_htree_de_lock(lck, frame->at);
537 + ext4_htree_spin_stop_listen(lck);
539 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
540 + /* someone split this DE-block before
541 + * I locked it, I need to retry and lock
542 + * valid DE-block */
543 + ext4_htree_de_unlock(lck);
550 blocks[level] = block;
552 frame->bh = ext4_read_dirblock(dir, block, INDEX);
553 @@ -973,7 +1277,7 @@ static void dx_release(struct dx_frame *
554 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
555 struct dx_frame *frame,
556 struct dx_frame *frames,
558 + __u32 *start_hash, struct htree_lock *lck)
561 struct buffer_head *bh;
562 @@ -988,12 +1292,22 @@ static int ext4_htree_next_block(struct
563 * this loop, num_frames indicates the number of interior
564 * nodes need to be read.
566 + ext4_htree_de_unlock(lck);
568 - if (++(p->at) < p->entries + dx_get_count(p->entries))
570 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
571 + /* num_frames > 0 :
573 + * ext4_htree_dx_locked:
574 + * frame->at is reliable pointer returned by dx_probe,
575 + * otherwise dx_probe already knew no collision */
576 + if (++(p->at) < p->entries + dx_get_count(p->entries))
582 + if (num_frames == 1)
583 + ext4_htree_dx_unlock(lck);
587 @@ -1016,6 +1330,13 @@ static int ext4_htree_next_block(struct
588 * block so no check is necessary
590 while (num_frames--) {
591 + if (num_frames == 0) {
592 + /* it's not always necessary, we just don't want to
593 + * detect hash collision again */
594 + ext4_htree_dx_need_lock(lck);
595 + ext4_htree_dx_lock(lck, p->at);
598 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
601 @@ -1024,6 +1345,7 @@ static int ext4_htree_next_block(struct
603 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
605 + ext4_htree_de_lock(lck, p->at);
609 @@ -1171,10 +1493,10 @@ int ext4_htree_fill_tree(struct file *di
611 hinfo.hash = start_hash;
612 hinfo.minor_hash = 0;
613 - frame = dx_probe(NULL, dir, &hinfo, frames);
614 + /* assume it's PR locked */
615 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
617 return PTR_ERR(frame);
619 /* Add '.' and '..' from the htree header */
620 if (!start_hash && !start_minor_hash) {
621 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
622 @@ -1214,7 +1536,7 @@ int ext4_htree_fill_tree(struct file *di
625 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
626 - frame, frames, &hashval);
627 + frame, frames, &hashval, NULL);
628 *next_hash = hashval;
631 @@ -1413,10 +1735,10 @@ static int is_dx_internal_node(struct in
632 * The returned buffer_head has ->b_count elevated. The caller is expected
633 * to brelse() it when appropriate.
635 -static struct buffer_head * ext4_find_entry (struct inode *dir,
636 +struct buffer_head *__ext4_find_entry(struct inode *dir,
637 const struct qstr *d_name,
638 struct ext4_dir_entry_2 **res_dir,
640 + int *inlined, struct htree_lock *lck)
642 struct super_block *sb;
643 struct buffer_head *bh_use[NAMEI_RA_SIZE];
644 @@ -1465,7 +1787,7 @@ static struct buffer_head * ext4_find_en
648 - ret = ext4_dx_find_entry(dir, &fname, res_dir);
649 + ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
651 * On success, or if the error was file not found,
652 * return. Otherwise, fall back to doing a search the
653 @@ -1475,6 +1797,7 @@ static struct buffer_head * ext4_find_en
654 goto cleanup_and_exit;
655 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
657 + ext4_htree_safe_relock(lck);
660 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
661 @@ -1566,10 +1889,12 @@ cleanup_and_exit:
662 ext4_fname_free_filename(&fname);
665 +EXPORT_SYMBOL(__ext4_find_entry);
667 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
668 struct ext4_filename *fname,
669 - struct ext4_dir_entry_2 **res_dir)
670 + struct ext4_dir_entry_2 **res_dir,
671 + struct htree_lock *lck)
673 struct super_block * sb = dir->i_sb;
674 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
675 @@ -1580,7 +1905,7 @@ static struct buffer_head * ext4_dx_find
676 #ifdef CONFIG_EXT4_FS_ENCRYPTION
679 - frame = dx_probe(fname, dir, NULL, frames);
680 + frame = dx_probe(fname, dir, NULL, frames, lck);
682 return (struct buffer_head *) frame;
684 @@ -1602,7 +1927,7 @@ static struct buffer_head * ext4_dx_find
686 /* Check to see if we should continue to search */
687 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
689 + frames, NULL, lck);
691 ext4_warning_inode(dir,
692 "error %d reading directory index block",
693 @@ -1777,8 +2102,9 @@ static struct ext4_dir_entry_2* dx_pack_
694 * Returns pointer to de in block into which the new entry will be inserted.
696 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
697 - struct buffer_head **bh,struct dx_frame *frame,
698 - struct dx_hash_info *hinfo)
699 + struct buffer_head **bh, struct dx_frame *frames,
700 + struct dx_frame *frame, struct dx_hash_info *hinfo,
701 + struct htree_lock *lck)
703 unsigned blocksize = dir->i_sb->s_blocksize;
705 @@ -1854,8 +2180,14 @@ static struct ext4_dir_entry_2 *do_split
706 hash2, split, count-split));
708 /* Fancy dance to stay within two buffers */
709 - de2 = dx_move_dirents(data1, data2, map + split, count - split,
711 + if (hinfo->hash < hash2) {
712 + de2 = dx_move_dirents(data1, data2, map + split,
713 + count - split, blocksize);
715 + /* make sure we will add entry to the same block which
716 + * we have already locked */
717 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
719 de = dx_pack_dirents(data1, blocksize);
720 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
722 @@ -1876,12 +2208,21 @@ static struct ext4_dir_entry_2 *do_split
723 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
726 - /* Which block gets the new entry? */
727 - if (hinfo->hash >= hash2) {
730 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
731 + frame->at); /* notify block is being split */
732 + if (hinfo->hash < hash2) {
733 + dx_insert_block(frame, hash2 + continued, newblock);
736 + /* switch block number */
737 + dx_insert_block(frame, hash2 + continued,
738 + dx_get_block(frame->at));
739 + dx_set_block(frame->at, newblock);
742 - dx_insert_block(frame, hash2 + continued, newblock);
743 + ext4_htree_spin_unlock(lck);
744 + ext4_htree_dx_unlock(lck);
746 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
749 @@ -2155,7 +2496,7 @@ static int make_indexed_dir(handle_t *ha
753 - de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
754 + de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
756 retval = PTR_ERR(de);
758 @@ -2265,8 +2606,8 @@ out:
759 * may not sleep between calling this and putting something into
760 * the entry, as someone else might have used it while you slept.
762 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
763 - struct inode *inode)
764 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
765 + struct inode *inode, struct htree_lock *lck)
767 struct inode *dir = d_inode(dentry->d_parent);
768 struct buffer_head *bh = NULL;
769 @@ -2307,9 +2648,10 @@ static int ext4_add_entry(handle_t *hand
770 if (dentry->d_name.len == 2 &&
771 memcmp(dentry->d_name.name, "..", 2) == 0)
772 return ext4_update_dotdot(handle, dentry, inode);
773 - retval = ext4_dx_add_entry(handle, &fname, dir, inode);
774 + retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
775 if (!retval || (retval != ERR_BAD_DX_DIR))
777 + ext4_htree_safe_relock(lck);
778 /* Can we just ignore htree data? */
779 if (ext4_has_metadata_csum(sb)) {
780 EXT4_ERROR_INODE(dir,
781 @@ -2372,12 +2714,14 @@ out:
782 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
785 +EXPORT_SYMBOL(__ext4_add_entry);
788 * Returns 0 for success, or a negative error value
790 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
791 - struct inode *dir, struct inode *inode)
792 + struct inode *dir, struct inode *inode,
793 + struct htree_lock *lck)
795 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
796 struct dx_entry *entries, *at;
797 @@ -2389,7 +2733,7 @@ static int ext4_dx_add_entry(handle_t *h
801 - frame = dx_probe(fname, dir, NULL, frames);
802 + frame = dx_probe(fname, dir, NULL, frames, lck);
804 return PTR_ERR(frame);
805 entries = frame->entries;
806 @@ -2424,6 +2768,11 @@ again:
807 struct dx_node *node2;
808 struct buffer_head *bh2;
810 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
811 + ext4_htree_safe_relock(lck);
815 while (frame > frames) {
816 if (dx_get_count((frame - 1)->entries) <
817 dx_get_limit((frame - 1)->entries)) {
818 @@ -2525,8 +2874,32 @@ again:
822 + } else if (!ext4_htree_dx_locked(lck)) {
823 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
825 + /* not well protected, require DX lock */
826 + ext4_htree_dx_need_lock(lck);
827 + at = frame > frames ? (frame - 1)->at : NULL;
829 + /* NB: no risk of deadlock because it's just a try.
831 + * NB: we check ld_count for twice, the first time before
832 + * having DX lock, the second time after holding DX lock.
834 + * NB: We never free blocks for directory so far, which
835 + * means value returned by dx_get_count() should equal to
836 + * ld->ld_count if nobody split any DE-block under @at,
837 + * and ld->ld_at still points to valid dx_entry. */
838 + if ((ld->ld_count != dx_get_count(entries)) ||
839 + !ext4_htree_dx_lock_try(lck, at) ||
840 + (ld->ld_count != dx_get_count(entries))) {
844 + /* OK, I've got DX lock and nothing changed */
845 + frame->at = ld->ld_at;
847 - de = do_split(handle, dir, &bh, frame, &fname->hinfo);
848 + de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
852 @@ -2537,6 +2910,8 @@ again:
854 ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
856 + ext4_htree_dx_unlock(lck);
857 + ext4_htree_de_unlock(lck);
860 /* @restart is true means htree-path has been changed, we need to
861 Index: linux-4.18.0-423.el8/fs/ext4/super.c
862 ===================================================================
863 --- linux-4.18.0-423.el8.orig/fs/ext4/super.c
864 +++ linux-4.18.0-423.el8/fs/ext4/super.c
865 @@ -1136,6 +1136,7 @@ static struct inode *ext4_alloc_inode(st
867 inode_set_iversion(&ei->vfs_inode, 1);
868 spin_lock_init(&ei->i_raw_lock);
869 + sema_init(&ei->i_append_sem, 1);
870 INIT_LIST_HEAD(&ei->i_prealloc_list);
871 spin_lock_init(&ei->i_prealloc_lock);
872 ext4_es_init_tree(&ei->i_es_tree);