1 LU-50 ldiskfs: pdirops patch for ldiskfs
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
16 - pdirops support for ldiskfs
17 - N-level htree directory
18 - integrate with osd-ldiskfs
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
25 fs/ext4/Makefile | 1 +
26 fs/ext4/ext4.h | 78 ++++++++
27 fs/ext4/namei.c | 465 ++++++++++++++++++++++++++++++++++++++++++-----
29 4 files changed, 504 insertions(+), 41 deletions(-)
31 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
32 index 49e7af6..f7ced03 100644
33 --- a/fs/ext4/Makefile
34 +++ b/fs/ext4/Makefile
35 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
37 ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
38 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
40 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
41 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
42 super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
43 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44 index 54734be..fa5d5d6 100644
48 #include <linux/mutex.h>
49 #include <linux/timer.h>
50 #include <linux/wait.h>
51 +#include <linux/htree_lock.h>
52 #include <linux/sched/signal.h>
53 #include <linux/blockgroup_lock.h>
54 #include <linux/percpu_counter.h>
55 @@ -1020,6 +1021,9 @@ struct ext4_inode_info {
57 ext4_fsblk_t i_file_acl;
59 + /* following fields for parallel directory operations -bzzz */
60 + struct semaphore i_append_sem;
63 * i_block_group is the number of the block group which contains
64 * this file's inode. Constant across the lifetime of the inode,
65 @@ -2509,6 +2513,72 @@ struct dx_hash_info
67 #define HASH_NB_ALWAYS 1
69 +/* assume name-hash is protected by upper layer */
70 +#define EXT4_HTREE_LOCK_HASH 0
72 +enum ext4_pdo_lk_types {
73 +#if EXT4_HTREE_LOCK_HASH
76 + EXT4_LK_DX, /* index block */
77 + EXT4_LK_DE, /* directory entry block */
78 + EXT4_LK_SPIN, /* spinlock */
83 +#define EXT4_LB_RO(b) (1 << (b))
84 +/* read + write, high bits for writer */
85 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
87 +enum ext4_pdo_lock_bits {
89 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
90 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
92 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
93 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
94 + /* DX spinlock bits */
95 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
96 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
97 + /* accurate searching */
98 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
101 +enum ext4_pdo_lock_opc {
103 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
104 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
106 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
108 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
111 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
113 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
114 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
117 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
118 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
120 +extern struct htree_lock *ext4_htree_lock_alloc(void);
121 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
123 +extern void ext4_htree_lock(struct htree_lock *lck,
124 + struct htree_lock_head *lhead,
125 + struct inode *dir, unsigned flags);
126 +#define ext4_htree_unlock(lck) htree_unlock(lck)
128 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
129 + const struct qstr *d_name,
130 + struct ext4_dir_entry_2 **res_dir,
131 + int *inlined, struct htree_lock *lck);
132 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
133 + struct inode *inode, struct htree_lock *lck);
135 struct ext4_filename {
136 const struct qstr *usr_fname;
137 struct fscrypt_str disk_name;
138 @@ -2887,12 +2957,20 @@ void ext4_insert_dentry(struct inode *dir, struct inode *inode,
140 static inline void ext4_update_dx_flag(struct inode *inode)
142 + /* Disable it for ldiskfs, because going from a DX directory to
143 + * a non-DX directory while it is in use will completely break
144 + * the htree-locking.
145 + * If we really want to support this operation in the future,
146 + * we need to exclusively lock the directory at here which will
147 + * increase complexity of code */
149 if (!ext4_has_feature_dir_index(inode->i_sb) &&
150 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
151 /* ext4_iget() should have caught this... */
152 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
153 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
157 static const unsigned char ext4_filetype_table[] = {
158 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
159 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
160 index 51c950b..1b8c80e 100644
161 --- a/fs/ext4/namei.c
162 +++ b/fs/ext4/namei.c
163 @@ -56,6 +56,7 @@ struct buffer_head *ext4_append(handle_t *handle,
165 struct ext4_map_blocks map;
166 struct buffer_head *bh;
167 + struct ext4_inode_info *ei = EXT4_I(inode);
170 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
171 @@ -63,6 +64,10 @@ struct buffer_head *ext4_append(handle_t *handle,
172 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
173 return ERR_PTR(-ENOSPC);
175 + /* with parallel dir operations all appends
176 + * have to be serialized -bzzz */
177 + down(&ei->i_append_sem);
179 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
182 @@ -73,21 +78,27 @@ struct buffer_head *ext4_append(handle_t *handle,
185 err = ext4_map_blocks(NULL, inode, &map, 0);
188 + up(&ei->i_append_sem);
192 + up(&ei->i_append_sem);
193 EXT4_ERROR_INODE(inode, "Logical block already allocated");
194 return ERR_PTR(-EFSCORRUPTED);
197 bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
200 + up(&ei->i_append_sem);
203 inode->i_size += inode->i_sb->s_blocksize;
204 EXT4_I(inode)->i_disksize = inode->i_size;
205 BUFFER_TRACE(bh, "get_write_access");
206 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
208 + up(&ei->i_append_sem);
211 ext4_std_error(inode->i_sb, err);
212 @@ -291,7 +302,8 @@ static unsigned dx_node_limit(struct inode *dir);
213 static struct dx_frame *dx_probe(struct ext4_filename *fname,
215 struct dx_hash_info *hinfo,
216 - struct dx_frame *frame);
217 + struct dx_frame *frame,
218 + struct htree_lock *lck);
219 static void dx_release(struct dx_frame *frames, struct inode *dir);
220 static int dx_make_map(struct inode *dir, struct buffer_head *bh,
221 struct dx_hash_info *hinfo,
222 @@ -307,12 +319,13 @@ static void dx_insert_block(struct dx_frame *frame,
223 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
224 struct dx_frame *frame,
225 struct dx_frame *frames,
226 - __u32 *start_hash);
227 + __u32 *start_hash, struct htree_lock *lck);
228 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
229 struct ext4_filename *fname,
230 - struct ext4_dir_entry_2 **res_dir);
231 + struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
232 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
233 - struct inode *dir, struct inode *inode);
234 + struct inode *dir, struct inode *inode,
235 + struct htree_lock *lck);
237 /* checksumming functions */
238 void ext4_initialize_dirent_tail(struct buffer_head *bh,
239 @@ -797,6 +810,227 @@ static inline void htree_rep_invariant_check(struct dx_entry *at,
241 #endif /* DX_DEBUG */
243 +/* private data for htree_lock */
244 +struct ext4_dir_lock_data {
245 + unsigned ld_flags; /* bits-map for lock types */
246 + unsigned ld_count; /* # entries of the last DX block */
247 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
248 + struct dx_entry *ld_at; /* position of leaf dx_entry */
251 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
252 +#define ext4_find_entry(dir, name, dirent, inline) \
253 + ext4_find_entry_locked(dir, name, dirent, inline, NULL)
254 +#define ext4_add_entry(handle, dentry, inode) \
255 + ext4_add_entry_locked(handle, dentry, inode, NULL)
257 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
258 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
260 +static void ext4_htree_event_cb(void *target, void *event)
262 + u64 *block = (u64 *)target;
264 + if (*block == dx_get_block((struct dx_entry *)event))
265 + *block = EXT4_HTREE_NODE_CHANGED;
268 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
270 + struct htree_lock_head *lhead;
272 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
273 + if (lhead != NULL) {
274 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
275 + ext4_htree_event_cb);
279 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
281 +struct htree_lock *ext4_htree_lock_alloc(void)
283 + return htree_lock_alloc(EXT4_LK_MAX,
284 + sizeof(struct ext4_dir_lock_data));
286 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
288 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
291 + default: /* 0 or unknown flags require EX lock */
292 + return HTREE_LOCK_EX;
293 + case EXT4_HLOCK_READDIR:
294 + return HTREE_LOCK_PR;
295 + case EXT4_HLOCK_LOOKUP:
296 + return HTREE_LOCK_CR;
297 + case EXT4_HLOCK_DEL:
298 + case EXT4_HLOCK_ADD:
299 + return HTREE_LOCK_CW;
303 +/* return PR for read-only operations, otherwise return EX */
304 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
306 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
308 + /* 0 requires EX lock */
309 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
312 +static int ext4_htree_safe_locked(struct htree_lock *lck)
316 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
319 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
321 + if (writer) /* all readers & writers are excluded? */
322 + return lck->lk_mode == HTREE_LOCK_EX;
324 + /* all writers are excluded? */
325 + return lck->lk_mode == HTREE_LOCK_PR ||
326 + lck->lk_mode == HTREE_LOCK_PW ||
327 + lck->lk_mode == HTREE_LOCK_EX;
330 +/* relock htree_lock with EX mode if it's change operation, otherwise
331 + * relock it with PR mode. It's noop if PDO is disabled. */
332 +static void ext4_htree_safe_relock(struct htree_lock *lck)
334 + if (!ext4_htree_safe_locked(lck)) {
335 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
337 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
341 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
342 + struct inode *dir, unsigned flags)
344 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
345 + ext4_htree_safe_mode(flags);
347 + ext4_htree_lock_data(lck)->ld_flags = flags;
348 + htree_lock(lck, lhead, mode);
350 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
352 +EXPORT_SYMBOL(ext4_htree_lock);
354 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
355 + unsigned lmask, int wait, void *ev)
357 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
360 + /* NOOP if htree is well protected or caller doesn't require the lock */
361 + if (ext4_htree_safe_locked(lck) ||
362 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
365 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
366 + HTREE_LOCK_PW : HTREE_LOCK_PR;
368 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
370 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
372 + cpu_relax(); /* spin until granted */
376 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
378 + return ext4_htree_safe_locked(lck) ||
379 + htree_node_is_granted(lck, ffz(~lmask));
382 +static void ext4_htree_node_unlock(struct htree_lock *lck,
383 + unsigned lmask, void *buf)
385 + /* NB: it's safe to call mutiple times or even it's not locked */
386 + if (!ext4_htree_safe_locked(lck) &&
387 + htree_node_is_granted(lck, ffz(~lmask)))
388 + htree_node_unlock(lck, ffz(~lmask), buf);
391 +#define ext4_htree_dx_lock(lck, key) \
392 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
393 +#define ext4_htree_dx_lock_try(lck, key) \
394 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
395 +#define ext4_htree_dx_unlock(lck) \
396 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
397 +#define ext4_htree_dx_locked(lck) \
398 + ext4_htree_node_locked(lck, EXT4_LB_DX)
400 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
402 + struct ext4_dir_lock_data *ld;
404 + if (ext4_htree_safe_locked(lck))
407 + ld = ext4_htree_lock_data(lck);
408 + switch (ld->ld_flags) {
411 + case EXT4_HLOCK_LOOKUP:
412 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
414 + case EXT4_HLOCK_DEL:
415 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
417 + case EXT4_HLOCK_ADD:
418 + ld->ld_flags = EXT4_HLOCK_SPLIT;
423 +#define ext4_htree_de_lock(lck, key) \
424 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
425 +#define ext4_htree_de_unlock(lck) \
426 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
428 +#define ext4_htree_spin_lock(lck, key, event) \
429 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
430 +#define ext4_htree_spin_unlock(lck) \
431 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
432 +#define ext4_htree_spin_unlock_listen(lck, p) \
433 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
435 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
437 + if (!ext4_htree_safe_locked(lck) &&
438 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
439 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
443 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
444 + DX_HASH_COL_YES, /* there is collision and it does matter */
445 + DX_HASH_COL_NO, /* there is no collision */
448 +static int dx_probe_hash_collision(struct htree_lock *lck,
449 + struct dx_entry *entries,
450 + struct dx_entry *at, u32 hash)
452 + if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
453 + return DX_HASH_COL_IGNORE; /* don't care about collision */
455 + } else if (at == entries + dx_get_count(entries) - 1) {
456 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
458 + } else { /* hash collision? */
459 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
460 + DX_HASH_COL_YES : DX_HASH_COL_NO;
465 * Probe for a directory leaf block to search.
467 @@ -808,10 +1042,11 @@ static inline void htree_rep_invariant_check(struct dx_entry *at,
469 static struct dx_frame *
470 dx_probe(struct ext4_filename *fname, struct inode *dir,
471 - struct dx_hash_info *hinfo, struct dx_frame *frame_in)
472 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
473 + struct htree_lock *lck)
475 unsigned count, indirect, level, i;
476 - struct dx_entry *at, *entries, *p, *q, *m;
477 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
478 struct dx_root_info *info;
479 struct dx_frame *frame = frame_in;
480 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
481 @@ -895,8 +1130,16 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
485 + if (indirect == level) { /* the last index level */
486 + /* NB: ext4_htree_dx_lock() could be noop if
487 + * DX-lock flag is not set for current operation
489 + ext4_htree_dx_lock(lck, dx);
490 + ext4_htree_spin_lock(lck, dx, NULL);
492 count = dx_get_count(entries);
493 if (!count || count > dx_get_limit(entries)) {
494 + ext4_htree_spin_unlock(lck); /* release spin */
495 ext4_warning_inode(dir,
496 "dx entry: count %u beyond limit %u",
497 count, dx_get_limit(entries));
498 @@ -923,6 +1166,74 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
499 frame->entries = entries;
502 + if (indirect == level) { /* the last index level */
503 + struct ext4_dir_lock_data *ld;
506 + /* By default we only lock DE-block, however, we will
507 + * also lock the last level DX-block if:
508 + * a) there is hash collision
509 + * we will set DX-lock flag (a few lines below)
510 + * and redo to lock DX-block
511 + * see detail in dx_probe_hash_collision()
512 + * b) it's a retry from splitting
513 + * we need to lock the last level DX-block so nobody
514 + * else can split any leaf blocks under the same
515 + * DX-block, see detail in ext4_dx_add_entry()
517 + if (ext4_htree_dx_locked(lck)) {
518 + /* DX-block is locked, just lock DE-block
521 + ext4_htree_spin_unlock(lck);
522 + if (!ext4_htree_safe_locked(lck))
523 + ext4_htree_de_lock(lck, frame->at);
526 + /* it's pdirop and no DX lock */
527 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
529 + /* found hash collision, set DX-lock flag
530 + * and retry to abtain DX-lock
532 + ext4_htree_spin_unlock(lck);
533 + ext4_htree_dx_need_lock(lck);
536 + ld = ext4_htree_lock_data(lck);
537 + /* because I don't lock DX, so @at can't be trusted
538 + * after I release spinlock so I have to save it
541 + ld->ld_at_entry = *at;
542 + ld->ld_count = dx_get_count(entries);
544 + frame->at = &ld->ld_at_entry;
545 + myblock = dx_get_block(at);
547 + /* NB: ordering locking */
548 + ext4_htree_spin_unlock_listen(lck, &myblock);
549 + /* other thread can split this DE-block because:
550 + * a) I don't have lock for the DE-block yet
551 + * b) I released spinlock on DX-block
552 + * if it happened I can detect it by listening
553 + * splitting event on this DE-block
555 + ext4_htree_de_lock(lck, frame->at);
556 + ext4_htree_spin_stop_listen(lck);
558 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
559 + /* someone split this DE-block before
560 + * I locked it, I need to retry and lock
563 + ext4_htree_de_unlock(lck);
570 block = dx_get_block(at);
571 for (i = 0; i <= level; i++) {
572 if (blocks[i] == block) {
573 @@ -932,8 +1243,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
577 - if (++level > indirect)
580 blocks[level] = block;
582 frame->bh = ext4_read_dirblock(dir, block, INDEX);
583 @@ -1004,7 +1314,7 @@ static void dx_release(struct dx_frame *frames, struct inode *dir)
584 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
585 struct dx_frame *frame,
586 struct dx_frame *frames,
588 + __u32 *start_hash, struct htree_lock *lck)
591 struct buffer_head *bh;
592 @@ -1019,12 +1329,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
593 * this loop, num_frames indicates the number of interior
594 * nodes need to be read.
596 + ext4_htree_de_unlock(lck);
598 - if (++(p->at) < p->entries + dx_get_count(p->entries))
600 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
601 + /* num_frames > 0 :
603 + * ext4_htree_dx_locked:
604 + * frame->at is reliable pointer returned by dx_probe,
605 + * otherwise dx_probe already knew no collision */
606 + if (++(p->at) < p->entries + dx_get_count(p->entries))
612 + if (num_frames == 1)
613 + ext4_htree_dx_unlock(lck);
617 @@ -1047,6 +1367,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
618 * block so no check is necessary
620 while (num_frames--) {
621 + if (num_frames == 0) {
622 + /* it's not always necessary, we just don't want to
623 + * detect hash collision again */
624 + ext4_htree_dx_need_lock(lck);
625 + ext4_htree_dx_lock(lck, p->at);
628 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
631 @@ -1055,6 +1382,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
633 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
635 + ext4_htree_de_lock(lck, p->at);
639 @@ -1216,10 +1544,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
641 hinfo.hash = start_hash;
642 hinfo.minor_hash = 0;
643 - frame = dx_probe(NULL, dir, &hinfo, frames);
644 + /* assume it's PR locked */
645 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
647 return PTR_ERR(frame);
649 /* Add '.' and '..' from the htree header */
650 if (!start_hash && !start_minor_hash) {
651 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
652 @@ -1259,7 +1587,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
655 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
656 - frame, frames, &hashval);
657 + frame, frames, &hashval, NULL);
658 *next_hash = hashval;
661 @@ -1579,7 +1907,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
662 static struct buffer_head *__ext4_find_entry(struct inode *dir,
663 struct ext4_filename *fname,
664 struct ext4_dir_entry_2 **res_dir,
666 + int *inlined, struct htree_lock *lck)
668 struct super_block *sb;
669 struct buffer_head *bh_use[NAMEI_RA_SIZE];
670 @@ -1621,7 +1949,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
674 - ret = ext4_dx_find_entry(dir, fname, res_dir);
675 + ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
677 * On success, or if the error was file not found,
678 * return. Otherwise, fall back to doing a search the
679 @@ -1631,6 +1959,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
680 goto cleanup_and_exit;
681 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
683 + ext4_htree_safe_relock(lck);
686 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
687 @@ -1721,10 +2050,10 @@ cleanup_and_exit:
691 -static struct buffer_head *ext4_find_entry(struct inode *dir,
692 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
693 const struct qstr *d_name,
694 struct ext4_dir_entry_2 **res_dir,
696 + int *inlined, struct htree_lock *lck)
699 struct ext4_filename fname;
700 @@ -1736,12 +2065,14 @@ static struct buffer_head *ext4_find_entry(struct inode *dir,
704 - bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
705 + bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
707 ext4_fname_free_filename(&fname);
711 +EXPORT_SYMBOL(ext4_find_entry_locked);
713 static struct buffer_head *ext4_lookup_entry(struct inode *dir,
714 struct dentry *dentry,
715 struct ext4_dir_entry_2 **res_dir)
716 @@ -1757,7 +2088,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
720 - bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
721 + bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
723 ext4_fname_free_filename(&fname);
725 @@ -1765,7 +2096,8 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
727 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
728 struct ext4_filename *fname,
729 - struct ext4_dir_entry_2 **res_dir)
730 + struct ext4_dir_entry_2 **res_dir,
731 + struct htree_lock *lck)
733 struct super_block * sb = dir->i_sb;
734 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
735 @@ -1776,7 +2108,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
736 #ifdef CONFIG_FS_ENCRYPTION
739 - frame = dx_probe(fname, dir, NULL, frames);
740 + frame = dx_probe(fname, dir, NULL, frames, lck);
742 return (struct buffer_head *) frame;
744 @@ -1798,7 +2130,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
746 /* Check to see if we should continue to search */
747 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
749 + frames, NULL, lck);
751 ext4_warning_inode(dir,
752 "error %d reading directory index block",
753 @@ -1987,8 +2319,9 @@ static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
754 * Returns pointer to de in block into which the new entry will be inserted.
756 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
757 - struct buffer_head **bh,struct dx_frame *frame,
758 - struct dx_hash_info *hinfo)
759 + struct buffer_head **bh, struct dx_frame *frames,
760 + struct dx_frame *frame, struct dx_hash_info *hinfo,
761 + struct htree_lock *lck)
763 unsigned blocksize = dir->i_sb->s_blocksize;
765 @@ -2065,8 +2398,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
766 hash2, split, count-split));
768 /* Fancy dance to stay within two buffers */
769 - de2 = dx_move_dirents(dir, data1, data2, map + split, count - split,
771 + if (hinfo->hash < hash2) {
772 + de2 = dx_move_dirents(dir, data1, data2, map + split,
773 + count - split, blocksize);
775 + /* make sure we will add entry to the same block which
776 + * we have already locked */
777 + de2 = dx_move_dirents(dir, data1, data2, map, split, blocksize);
779 de = dx_pack_dirents(dir, data1, blocksize);
780 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
782 @@ -2084,12 +2423,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
783 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
786 - /* Which block gets the new entry? */
787 - if (hinfo->hash >= hash2) {
790 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
791 + frame->at); /* notify block is being split */
792 + if (hinfo->hash < hash2) {
793 + dx_insert_block(frame, hash2 + continued, newblock);
796 + /* switch block number */
797 + dx_insert_block(frame, hash2 + continued,
798 + dx_get_block(frame->at));
799 + dx_set_block(frame->at, newblock);
802 - dx_insert_block(frame, hash2 + continued, newblock);
803 + ext4_htree_spin_unlock(lck);
804 + ext4_htree_dx_unlock(lck);
806 err = ext4_handle_dirty_dirblock(handle, dir, bh2);
809 @@ -2388,7 +2736,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
813 - de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
814 + de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
816 retval = PTR_ERR(de);
818 @@ -2497,8 +2845,8 @@ out:
819 * may not sleep between calling this and putting something into
820 * the entry, as someone else might have used it while you slept.
822 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
823 - struct inode *inode)
824 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
825 + struct inode *inode, struct htree_lock *lck)
827 struct inode *dir = d_inode(dentry->d_parent);
828 struct buffer_head *bh = NULL;
829 @@ -2547,9 +2895,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
830 return ext4_update_dotdot(handle, dentry, inode);
833 - retval = ext4_dx_add_entry(handle, &fname, dir, inode);
834 + retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
835 if (!retval || (retval != ERR_BAD_DX_DIR))
837 + ext4_htree_safe_relock(lck);
838 /* Can we just ignore htree data? */
839 if (ext4_has_metadata_csum(sb)) {
840 EXT4_ERROR_INODE(dir,
841 @@ -2612,12 +2961,14 @@ out:
842 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
845 +EXPORT_SYMBOL(ext4_add_entry_locked);
848 * Returns 0 for success, or a negative error value
850 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
851 - struct inode *dir, struct inode *inode)
852 + struct inode *dir, struct inode *inode,
853 + struct htree_lock *lck)
855 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
856 struct dx_entry *entries, *at;
857 @@ -2629,7 +2980,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
861 - frame = dx_probe(fname, dir, NULL, frames);
862 + frame = dx_probe(fname, dir, NULL, frames, lck);
864 return PTR_ERR(frame);
865 entries = frame->entries;
866 @@ -2664,6 +3015,12 @@ again:
867 struct dx_node *node2;
868 struct buffer_head *bh2;
870 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
871 + ext4_htree_safe_relock(lck);
876 while (frame > frames) {
877 if (dx_get_count((frame - 1)->entries) <
878 dx_get_limit((frame - 1)->entries)) {
879 @@ -2767,8 +3124,32 @@ again:
883 + } else if (!ext4_htree_dx_locked(lck)) {
884 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
886 + /* not well protected, require DX lock */
887 + ext4_htree_dx_need_lock(lck);
888 + at = frame > frames ? (frame - 1)->at : NULL;
890 + /* NB: no risk of deadlock because it's just a try.
892 + * NB: we check ld_count for twice, the first time before
893 + * having DX lock, the second time after holding DX lock.
895 + * NB: We never free blocks for directory so far, which
896 + * means value returned by dx_get_count() should equal to
897 + * ld->ld_count if nobody split any DE-block under @at,
898 + * and ld->ld_at still points to valid dx_entry. */
899 + if ((ld->ld_count != dx_get_count(entries)) ||
900 + !ext4_htree_dx_lock_try(lck, at) ||
901 + (ld->ld_count != dx_get_count(entries))) {
905 + /* OK, I've got DX lock and nothing changed */
906 + frame->at = ld->ld_at;
908 - de = do_split(handle, dir, &bh, frame, &fname->hinfo);
909 + de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
913 @@ -2779,6 +3160,8 @@ again:
915 ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
917 + ext4_htree_dx_unlock(lck);
918 + ext4_htree_de_unlock(lck);
920 dx_release(frames, dir);
921 /* @restart is true means htree-path has been changed, we need to
922 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
923 index a2fcbf8..82ea5f6 100644
924 --- a/fs/ext4/super.c
925 +++ b/fs/ext4/super.c
926 @@ -1291,6 +1291,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
928 inode_set_iversion(&ei->vfs_inode, 1);
929 spin_lock_init(&ei->i_raw_lock);
930 + sema_init(&ei->i_append_sem, 1);
931 INIT_LIST_HEAD(&ei->i_prealloc_list);
932 atomic_set(&ei->i_prealloc_active, 0);
933 spin_lock_init(&ei->i_prealloc_lock);