1 From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
2 From: Liang Zhen <liang.zhen@intel.com>
3 Date: Wed, 15 Jan 2020 07:35:13 -0600
4 Subject: [PATCH] LU-50 ldiskfs: parallel directory operations for ext4
6 In a typical use case an application creates a separate output file for each
7 node and task in a job. As nodes and tasks increase, hundreds of thousands of
8 files may be created in a single directory within a short window of time.
9 Today, both filename lookup and file system modifying operations (such as
10 create and unlink) are protected with a single lock for an entire ldiskfs
11 directory. PDO project will remove this bottleneck by introducing a parallel
12 locking mechanism for entire ldiskfs directories. This work will enable
13 multiple application threads to simultaneously lookup, create and unlink in
17 - pdirops support for ldiskfs
18 - integrate with osd-ldiskfs
19 Signed-off-by: Liang Zhen <liang.zhen@intel.com>
20 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 fs/ext4/Makefile | 1 +
23 fs/ext4/ext4.h | 78 ++++
24 fs/ext4/htree_lock.c | 891 +++++++++++++++++++++++++++++++++++++
25 fs/ext4/namei.c | 454 +++++++++++++++++--
27 include/linux/htree_lock.h | 187 ++++++++
28 6 files changed, 1572 insertions(+), 40 deletions(-)
29 create mode 100644 fs/ext4/htree_lock.c
30 create mode 100644 include/linux/htree_lock.h
32 diff -wur a/fs/ext4/ext4.h b/fs/ext4/ext4.h
33 --- a/fs/ext4/ext4.h 2020-08-30 12:06:02.782523259 -0600
34 +++ b/fs/ext4/ext4.h 2020-08-30 12:09:18.997212399 -0600
36 #include <linux/timer.h>
37 #include <linux/version.h>
38 #include <linux/wait.h>
39 +#include <linux/htree_lock.h>
40 #include <linux/sched/signal.h>
41 #include <linux/blockgroup_lock.h>
42 #include <linux/percpu_counter.h>
45 ext4_fsblk_t i_file_acl;
47 + /* following fields for parallel directory operations -bzzz */
48 + struct semaphore i_append_sem;
51 * i_block_group is the number of the block group which contains
52 * this file's inode. Constant across the lifetime of the inode,
53 @@ -2206,6 +2210,72 @@
55 #define HASH_NB_ALWAYS 1
57 +/* assume name-hash is protected by upper layer */
58 +#define EXT4_HTREE_LOCK_HASH 0
60 +enum ext4_pdo_lk_types {
61 +#if EXT4_HTREE_LOCK_HASH
64 + EXT4_LK_DX, /* index block */
65 + EXT4_LK_DE, /* directory entry block */
66 + EXT4_LK_SPIN, /* spinlock */
71 +#define EXT4_LB_RO(b) (1 << (b))
72 +/* read + write, high bits for writer */
73 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
75 +enum ext4_pdo_lock_bits {
77 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
78 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
80 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
81 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
82 + /* DX spinlock bits */
83 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
84 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
85 + /* accurate searching */
86 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
89 +enum ext4_pdo_lock_opc {
91 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
92 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
94 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
96 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
99 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
101 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
102 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
105 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
106 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
108 +extern struct htree_lock *ext4_htree_lock_alloc(void);
109 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
111 +extern void ext4_htree_lock(struct htree_lock *lck,
112 + struct htree_lock_head *lhead,
113 + struct inode *dir, unsigned flags);
114 +#define ext4_htree_unlock(lck) htree_unlock(lck)
116 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
117 + const struct qstr *d_name,
118 + struct ext4_dir_entry_2 **res_dir,
119 + int *inlined, struct htree_lock *lck);
120 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
121 + struct inode *inode, struct htree_lock *lck);
123 struct ext4_filename {
124 const struct qstr *usr_fname;
125 struct fscrypt_str disk_name;
126 @@ -2573,11 +2643,20 @@
127 struct ext4_filename *fname, void *data);
128 static inline void ext4_update_dx_flag(struct inode *inode)
130 + /* Disable it for ldiskfs, because going from a DX directory to
131 + * a non-DX directory while it is in use will completely break
132 + * the htree-locking.
133 + * If we really want to support this operation in the future,
134 + * we need to exclusively lock the directory at here which will
135 + * increase complexity of code
138 if (!ext4_has_feature_dir_index(inode->i_sb)) {
139 /* ext4_iget() should have caught this... */
140 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
141 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
145 static const unsigned char ext4_filetype_table[] = {
146 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
147 diff -wur a/fs/ext4/Makefile b/fs/ext4/Makefile
148 --- a/fs/ext4/Makefile 2020-08-30 12:06:02.378525933 -0600
149 +++ b/fs/ext4/Makefile 2020-08-30 12:07:32.337927838 -0600
152 ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
153 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
155 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
156 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
157 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
158 diff -wur a/fs/ext4/namei.c b/fs/ext4/namei.c
159 --- a/fs/ext4/namei.c 2020-08-30 12:06:02.746523498 -0600
160 +++ b/fs/ext4/namei.c 2020-08-30 12:11:25.136359125 -0600
164 struct buffer_head *bh;
165 + struct ext4_inode_info *ei = EXT4_I(inode);
168 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
170 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
171 return ERR_PTR(-ENOSPC);
173 + /* with parallel dir operations all appends
174 + * have to be serialized -bzzz */
175 + down(&ei->i_append_sem);
177 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
179 bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
182 + up(&ei->i_append_sem);
185 inode->i_size += inode->i_sb->s_blocksize;
186 EXT4_I(inode)->i_disksize = inode->i_size;
187 BUFFER_TRACE(bh, "get_write_access");
188 err = ext4_journal_get_write_access(handle, bh);
189 + up(&ei->i_append_sem);
192 ext4_std_error(inode->i_sb, err);
194 static struct dx_frame *dx_probe(struct ext4_filename *fname,
196 struct dx_hash_info *hinfo,
197 - struct dx_frame *frame);
198 + struct dx_frame *frame,
199 + struct htree_lock *lck);
200 static void dx_release(struct dx_frame *frames);
201 static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
202 unsigned blocksize, struct dx_hash_info *hinfo,
203 @@ -278,12 +287,13 @@
204 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
205 struct dx_frame *frame,
206 struct dx_frame *frames,
207 - __u32 *start_hash);
208 + __u32 *start_hash, struct htree_lock *lck);
209 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
210 struct ext4_filename *fname,
211 - struct ext4_dir_entry_2 **res_dir);
212 + struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
213 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
214 - struct inode *dir, struct inode *inode);
215 + struct inode *dir, struct inode *inode,
216 + struct htree_lock *lck);
218 /* checksumming functions */
219 void ext4_initialize_dirent_tail(struct buffer_head *bh,
220 @@ -748,6 +758,227 @@
222 #endif /* DX_DEBUG */
224 +/* private data for htree_lock */
225 +struct ext4_dir_lock_data {
226 + unsigned ld_flags; /* bits-map for lock types */
227 + unsigned ld_count; /* # entries of the last DX block */
228 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
229 + struct dx_entry *ld_at; /* position of leaf dx_entry */
232 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
233 +#define ext4_find_entry(dir, name, dirent, inline) \
234 + ext4_find_entry_locked(dir, name, dirent, inline, NULL)
235 +#define ext4_add_entry(handle, dentry, inode) \
236 + ext4_add_entry_locked(handle, dentry, inode, NULL)
238 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
239 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
241 +static void ext4_htree_event_cb(void *target, void *event)
243 + u64 *block = (u64 *)target;
245 + if (*block == dx_get_block((struct dx_entry *)event))
246 + *block = EXT4_HTREE_NODE_CHANGED;
249 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
251 + struct htree_lock_head *lhead;
253 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
254 + if (lhead != NULL) {
255 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
256 + ext4_htree_event_cb);
260 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
262 +struct htree_lock *ext4_htree_lock_alloc(void)
264 + return htree_lock_alloc(EXT4_LK_MAX,
265 + sizeof(struct ext4_dir_lock_data));
267 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
269 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
272 + default: /* 0 or unknown flags require EX lock */
273 + return HTREE_LOCK_EX;
274 + case EXT4_HLOCK_READDIR:
275 + return HTREE_LOCK_PR;
276 + case EXT4_HLOCK_LOOKUP:
277 + return HTREE_LOCK_CR;
278 + case EXT4_HLOCK_DEL:
279 + case EXT4_HLOCK_ADD:
280 + return HTREE_LOCK_CW;
284 +/* return PR for read-only operations, otherwise return EX */
285 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
287 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
289 + /* 0 requires EX lock */
290 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
293 +static int ext4_htree_safe_locked(struct htree_lock *lck)
297 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
300 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
302 + if (writer) /* all readers & writers are excluded? */
303 + return lck->lk_mode == HTREE_LOCK_EX;
305 + /* all writers are excluded? */
306 + return lck->lk_mode == HTREE_LOCK_PR ||
307 + lck->lk_mode == HTREE_LOCK_PW ||
308 + lck->lk_mode == HTREE_LOCK_EX;
311 +/* relock htree_lock with EX mode if it's change operation, otherwise
312 + * relock it with PR mode. It's noop if PDO is disabled. */
313 +static void ext4_htree_safe_relock(struct htree_lock *lck)
315 + if (!ext4_htree_safe_locked(lck)) {
316 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
318 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
322 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
323 + struct inode *dir, unsigned flags)
325 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
326 + ext4_htree_safe_mode(flags);
328 + ext4_htree_lock_data(lck)->ld_flags = flags;
329 + htree_lock(lck, lhead, mode);
331 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
333 +EXPORT_SYMBOL(ext4_htree_lock);
335 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
336 + unsigned lmask, int wait, void *ev)
338 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
341 + /* NOOP if htree is well protected or caller doesn't require the lock */
342 + if (ext4_htree_safe_locked(lck) ||
343 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
346 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
347 + HTREE_LOCK_PW : HTREE_LOCK_PR;
349 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
351 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
353 + cpu_relax(); /* spin until granted */
357 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
359 + return ext4_htree_safe_locked(lck) ||
360 + htree_node_is_granted(lck, ffz(~lmask));
363 +static void ext4_htree_node_unlock(struct htree_lock *lck,
364 + unsigned lmask, void *buf)
366 + /* NB: it's safe to call mutiple times or even it's not locked */
367 + if (!ext4_htree_safe_locked(lck) &&
368 + htree_node_is_granted(lck, ffz(~lmask)))
369 + htree_node_unlock(lck, ffz(~lmask), buf);
372 +#define ext4_htree_dx_lock(lck, key) \
373 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
374 +#define ext4_htree_dx_lock_try(lck, key) \
375 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
376 +#define ext4_htree_dx_unlock(lck) \
377 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
378 +#define ext4_htree_dx_locked(lck) \
379 + ext4_htree_node_locked(lck, EXT4_LB_DX)
381 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
383 + struct ext4_dir_lock_data *ld;
385 + if (ext4_htree_safe_locked(lck))
388 + ld = ext4_htree_lock_data(lck);
389 + switch (ld->ld_flags) {
392 + case EXT4_HLOCK_LOOKUP:
393 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
395 + case EXT4_HLOCK_DEL:
396 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
398 + case EXT4_HLOCK_ADD:
399 + ld->ld_flags = EXT4_HLOCK_SPLIT;
404 +#define ext4_htree_de_lock(lck, key) \
405 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
406 +#define ext4_htree_de_unlock(lck) \
407 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
409 +#define ext4_htree_spin_lock(lck, key, event) \
410 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
411 +#define ext4_htree_spin_unlock(lck) \
412 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
413 +#define ext4_htree_spin_unlock_listen(lck, p) \
414 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
416 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
418 + if (!ext4_htree_safe_locked(lck) &&
419 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
420 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
424 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
425 + DX_HASH_COL_YES, /* there is collision and it does matter */
426 + DX_HASH_COL_NO, /* there is no collision */
429 +static int dx_probe_hash_collision(struct htree_lock *lck,
430 + struct dx_entry *entries,
431 + struct dx_entry *at, u32 hash)
433 + if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
434 + return DX_HASH_COL_IGNORE; /* don't care about collision */
436 + } else if (at == entries + dx_get_count(entries) - 1) {
437 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
439 + } else { /* hash collision? */
440 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
441 + DX_HASH_COL_YES : DX_HASH_COL_NO;
446 * Probe for a directory leaf block to search.
448 @@ -759,10 +990,11 @@
450 static struct dx_frame *
451 dx_probe(struct ext4_filename *fname, struct inode *dir,
452 - struct dx_hash_info *hinfo, struct dx_frame *frame_in)
453 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
454 + struct htree_lock *lck)
456 unsigned count, indirect;
457 - struct dx_entry *at, *entries, *p, *q, *m;
458 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
459 struct dx_root_info *info;
460 struct dx_frame *frame = frame_in;
461 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
462 @@ -824,8 +1056,15 @@
464 dxtrace(printk("Look up %x", hash));
466 + if (indirect == 0) { /* the last index level */
467 + /* NB: ext4_htree_dx_lock() could be noop if
468 + * DX-lock flag is not set for current operation */
469 + ext4_htree_dx_lock(lck, dx);
470 + ext4_htree_spin_lock(lck, dx, NULL);
472 count = dx_get_count(entries);
473 - if (!count || count > dx_get_limit(entries)) {
474 + if (count == 0 || count > dx_get_limit(entries)) {
475 + ext4_htree_spin_unlock(lck); /* release spin */
476 ext4_warning_inode(dir,
477 "dx entry: count %u beyond limit %u",
478 count, dx_get_limit(entries));
479 @@ -864,8 +1103,70 @@
481 frame->entries = entries;
485 + if (indirect == 0) { /* the last index level */
486 + struct ext4_dir_lock_data *ld;
489 + /* By default we only lock DE-block, however, we will
490 + * also lock the last level DX-block if:
491 + * a) there is hash collision
492 + * we will set DX-lock flag (a few lines below)
493 + * and redo to lock DX-block
494 + * see detail in dx_probe_hash_collision()
495 + * b) it's a retry from splitting
496 + * we need to lock the last level DX-block so nobody
497 + * else can split any leaf blocks under the same
498 + * DX-block, see detail in ext4_dx_add_entry()
500 + if (ext4_htree_dx_locked(lck)) {
501 + /* DX-block is locked, just lock DE-block
503 + ext4_htree_spin_unlock(lck);
504 + if (!ext4_htree_safe_locked(lck))
505 + ext4_htree_de_lock(lck, frame->at);
508 + /* it's pdirop and no DX lock */
509 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
511 + /* found hash collision, set DX-lock flag
512 + * and retry to abtain DX-lock */
513 + ext4_htree_spin_unlock(lck);
514 + ext4_htree_dx_need_lock(lck);
517 + ld = ext4_htree_lock_data(lck);
518 + /* because I don't lock DX, so @at can't be trusted
519 + * after I release spinlock so I have to save it */
521 + ld->ld_at_entry = *at;
522 + ld->ld_count = dx_get_count(entries);
524 + frame->at = &ld->ld_at_entry;
525 + myblock = dx_get_block(at);
527 + /* NB: ordering locking */
528 + ext4_htree_spin_unlock_listen(lck, &myblock);
529 + /* other thread can split this DE-block because:
530 + * a) I don't have lock for the DE-block yet
531 + * b) I released spinlock on DX-block
532 + * if it happened I can detect it by listening
533 + * splitting event on this DE-block */
534 + ext4_htree_de_lock(lck, frame->at);
535 + ext4_htree_spin_stop_listen(lck);
537 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
538 + /* someone split this DE-block before
539 + * I locked it, I need to retry and lock
540 + * valid DE-block */
541 + ext4_htree_de_unlock(lck);
549 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
550 if (IS_ERR(frame->bh)) {
552 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
553 struct dx_frame *frame,
554 struct dx_frame *frames,
556 + __u32 *start_hash, struct htree_lock *lck)
559 struct buffer_head *bh;
560 @@ -949,12 +1250,22 @@
561 * this loop, num_frames indicates the number of interior
562 * nodes need to be read.
564 + ext4_htree_de_unlock(lck);
566 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
567 + /* num_frames > 0 :
569 + * ext4_htree_dx_locked:
570 + * frame->at is reliable pointer returned by dx_probe,
571 + * otherwise dx_probe already knew no collision */
572 if (++(p->at) < p->entries + dx_get_count(p->entries))
578 + if (num_frames == 1)
579 + ext4_htree_dx_unlock(lck);
583 @@ -977,6 +1288,13 @@
584 * block so no check is necessary
586 while (num_frames--) {
587 + if (num_frames == 0) {
588 + /* it's not always necessary, we just don't want to
589 + * detect hash collision again */
590 + ext4_htree_dx_need_lock(lck);
591 + ext4_htree_dx_lock(lck, p->at);
594 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
599 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
601 + ext4_htree_de_lock(lck, p->at);
605 @@ -1132,10 +1451,10 @@
607 hinfo.hash = start_hash;
608 hinfo.minor_hash = 0;
609 - frame = dx_probe(NULL, dir, &hinfo, frames);
610 + /* assume it's PR locked */
611 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
613 return PTR_ERR(frame);
615 /* Add '.' and '..' from the htree header */
616 if (!start_hash && !start_minor_hash) {
617 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
618 @@ -1175,7 +1494,7 @@
621 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
622 - frame, frames, &hashval);
623 + frame, frames, &hashval, NULL);
624 *next_hash = hashval;
627 @@ -1451,7 +1770,7 @@
628 static struct buffer_head *__ext4_find_entry(struct inode *dir,
629 struct ext4_filename *fname,
630 struct ext4_dir_entry_2 **res_dir,
632 + int *inlined, struct htree_lock *lck)
634 struct super_block *sb;
635 struct buffer_head *bh_use[NAMEI_RA_SIZE];
636 @@ -1493,7 +1812,7 @@
640 - ret = ext4_dx_find_entry(dir, fname, res_dir);
641 + ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
643 * On success, or if the error was file not found,
644 * return. Otherwise, fall back to doing a search the
645 @@ -1503,6 +1822,7 @@
646 goto cleanup_and_exit;
647 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
649 + ext4_htree_safe_relock(lck);
652 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
653 @@ -1591,10 +1911,10 @@
657 -static struct buffer_head *ext4_find_entry(struct inode *dir,
658 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
659 const struct qstr *d_name,
660 struct ext4_dir_entry_2 **res_dir,
662 + int *inlined, struct htree_lock *lck)
665 struct ext4_filename fname;
666 @@ -1606,12 +1926,14 @@
670 - bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
671 + bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
673 ext4_fname_free_filename(&fname);
677 +EXPORT_SYMBOL(ext4_find_entry_locked);
679 static struct buffer_head *ext4_lookup_entry(struct inode *dir,
680 struct dentry *dentry,
681 struct ext4_dir_entry_2 **res_dir)
682 @@ -1626,7 +1948,7 @@
686 - bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
687 + bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
689 ext4_fname_free_filename(&fname);
691 @@ -1634,7 +1956,8 @@
693 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
694 struct ext4_filename *fname,
695 - struct ext4_dir_entry_2 **res_dir)
696 + struct ext4_dir_entry_2 **res_dir,
697 + struct htree_lock *lck)
699 struct super_block * sb = dir->i_sb;
700 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
701 @@ -1645,7 +1968,7 @@
702 #ifdef CONFIG_FS_ENCRYPTION
705 - frame = dx_probe(fname, dir, NULL, frames);
706 + frame = dx_probe(fname, dir, NULL, frames, lck);
708 return (struct buffer_head *) frame;
710 @@ -1667,7 +1990,7 @@
712 /* Check to see if we should continue to search */
713 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
715 + frames, NULL, lck);
717 ext4_warning_inode(dir,
718 "error %d reading directory index block",
719 @@ -1847,8 +2170,9 @@
720 * Returns pointer to de in block into which the new entry will be inserted.
722 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
723 - struct buffer_head **bh,struct dx_frame *frame,
724 - struct dx_hash_info *hinfo)
725 + struct buffer_head **bh, struct dx_frame *frames,
726 + struct dx_frame *frame, struct dx_hash_info *hinfo,
727 + struct htree_lock *lck)
729 unsigned blocksize = dir->i_sb->s_blocksize;
730 unsigned count, continued;
731 @@ -1909,8 +2233,14 @@
732 hash2, split, count-split));
734 /* Fancy dance to stay within two buffers */
735 - de2 = dx_move_dirents(data1, data2, map + split, count - split,
737 + if (hinfo->hash < hash2) {
738 + de2 = dx_move_dirents(data1, data2, map + split,
739 + count - split, blocksize);
741 + /* make sure we will add entry to the same block which
742 + * we have already locked */
743 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
745 de = dx_pack_dirents(data1, blocksize);
746 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
748 @@ -1928,12 +2258,21 @@
749 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
752 - /* Which block gets the new entry? */
753 - if (hinfo->hash >= hash2) {
757 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
758 + frame->at); /* notify block is being split */
759 + if (hinfo->hash < hash2) {
760 dx_insert_block(frame, hash2 + continued, newblock);
763 + /* switch block number */
764 + dx_insert_block(frame, hash2 + continued,
765 + dx_get_block(frame->at));
766 + dx_set_block(frame->at, newblock);
769 + ext4_htree_spin_unlock(lck);
770 + ext4_htree_dx_unlock(lck);
772 err = ext4_handle_dirty_dirblock(handle, dir, bh2);
775 @@ -2203,7 +2542,7 @@
779 - de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
780 + de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
782 retval = PTR_ERR(de);
784 @@ -2313,8 +2652,8 @@
785 * may not sleep between calling this and putting something into
786 * the entry, as someone else might have used it while you slept.
788 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
789 - struct inode *inode)
790 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
791 + struct inode *inode, struct htree_lock *lck)
793 struct inode *dir = d_inode(dentry->d_parent);
794 struct buffer_head *bh = NULL;
795 @@ -2362,9 +2701,10 @@
796 if (dentry->d_name.len == 2 &&
797 memcmp(dentry->d_name.name, "..", 2) == 0)
798 return ext4_update_dotdot(handle, dentry, inode);
799 - retval = ext4_dx_add_entry(handle, &fname, dir, inode);
800 + retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
801 if (!retval || (retval != ERR_BAD_DX_DIR))
803 + ext4_htree_safe_relock(lck);
804 /* Can we just ignore htree data? */
805 if (ext4_has_metadata_csum(sb)) {
806 EXT4_ERROR_INODE(dir,
807 @@ -2425,12 +2765,14 @@
808 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
811 +EXPORT_SYMBOL(ext4_add_entry_locked);
814 * Returns 0 for success, or a negative error value
816 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
817 - struct inode *dir, struct inode *inode)
818 + struct inode *dir, struct inode *inode,
819 + struct htree_lock *lck)
821 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
822 struct dx_entry *entries, *at;
823 @@ -2442,7 +2784,7 @@
827 - frame = dx_probe(fname, dir, NULL, frames);
828 + frame = dx_probe(fname, dir, NULL, frames, lck);
830 return PTR_ERR(frame);
831 entries = frame->entries;
832 @@ -2477,6 +2819,12 @@
833 struct dx_node *node2;
834 struct buffer_head *bh2;
836 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
837 + ext4_htree_safe_relock(lck);
842 while (frame > frames) {
843 if (dx_get_count((frame - 1)->entries) <
844 dx_get_limit((frame - 1)->entries)) {
845 @@ -2579,8 +2927,32 @@
849 + } else if (!ext4_htree_dx_locked(lck)) {
850 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
852 + /* not well protected, require DX lock */
853 + ext4_htree_dx_need_lock(lck);
854 + at = frame > frames ? (frame - 1)->at : NULL;
856 + /* NB: no risk of deadlock because it's just a try.
858 + * NB: we check ld_count for twice, the first time before
859 + * having DX lock, the second time after holding DX lock.
861 + * NB: We never free blocks for directory so far, which
862 + * means value returned by dx_get_count() should equal to
863 + * ld->ld_count if nobody split any DE-block under @at,
864 + * and ld->ld_at still points to valid dx_entry. */
865 + if ((ld->ld_count != dx_get_count(entries)) ||
866 + !ext4_htree_dx_lock_try(lck, at) ||
867 + (ld->ld_count != dx_get_count(entries))) {
871 + /* OK, I've got DX lock and nothing changed */
872 + frame->at = ld->ld_at;
874 - de = do_split(handle, dir, &bh, frame, &fname->hinfo);
875 + de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
879 @@ -2591,6 +2963,8 @@
881 ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
883 + ext4_htree_dx_unlock(lck);
884 + ext4_htree_de_unlock(lck);
887 /* @restart is true means htree-path has been changed, we need to
888 diff -wur a/fs/ext4/super.c b/fs/ext4/super.c
889 --- a/fs/ext4/super.c 2020-08-30 12:06:02.746523498 -0600
890 +++ b/fs/ext4/super.c 2020-08-30 12:07:32.345927785 -0600
891 @@ -1087,6 +1087,7 @@
893 inode_set_iversion(&ei->vfs_inode, 1);
894 spin_lock_init(&ei->i_raw_lock);
895 + sema_init(&ei->i_append_sem, 1);
896 INIT_LIST_HEAD(&ei->i_prealloc_list);
897 spin_lock_init(&ei->i_prealloc_lock);
898 ext4_es_init_tree(&ei->i_es_tree);