1 LU-50 ldiskfs: pdirops patch for ldiskfs
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
16 - pdirops support for ldiskfs
17 - N-level htree directory
18 - integrate with osd-ldiskfs
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
25 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
26 ===================================================================
27 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile
28 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
29 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
31 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
32 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
34 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
35 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
36 xattr_trusted.o inline.o
37 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
38 ===================================================================
39 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
40 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
42 #include <linux/mutex.h>
43 #include <linux/timer.h>
44 #include <linux/wait.h>
45 +#include <linux/htree_lock.h>
46 #include <linux/blockgroup_lock.h>
47 #include <linux/percpu_counter.h>
48 #include <linux/ratelimit.h>
49 @@ -821,6 +822,9 @@ struct ext4_inode_info {
51 ext4_fsblk_t i_file_acl;
53 + /* following fields for parallel directory operations -bzzz */
54 + struct semaphore i_append_sem;
57 * i_block_group is the number of the block group which contains
58 * this file's inode. Constant across the lifetime of the inode,
59 @@ -1846,6 +1850,71 @@ struct dx_hash_info
61 #define HASH_NB_ALWAYS 1
63 +/* assume name-hash is protected by upper layer */
64 +#define EXT4_HTREE_LOCK_HASH 0
66 +enum ext4_pdo_lk_types {
67 +#if EXT4_HTREE_LOCK_HASH
70 + EXT4_LK_DX, /* index block */
71 + EXT4_LK_DE, /* directory entry block */
72 + EXT4_LK_SPIN, /* spinlock */
77 +#define EXT4_LB_RO(b) (1 << (b))
78 +/* read + write, high bits for writer */
79 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
81 +enum ext4_pdo_lock_bits {
83 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
84 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
86 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
87 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
88 + /* DX spinlock bits */
89 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
90 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
91 + /* accurate searching */
92 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
95 +enum ext4_pdo_lock_opc {
97 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
98 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
100 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
102 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
105 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
107 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
108 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
111 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
112 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
114 +extern struct htree_lock *ext4_htree_lock_alloc(void);
115 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
117 +extern void ext4_htree_lock(struct htree_lock *lck,
118 + struct htree_lock_head *lhead,
119 + struct inode *dir, unsigned flags);
120 +#define ext4_htree_unlock(lck) htree_unlock(lck)
122 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
123 + const struct qstr *d_name,
124 + struct ext4_dir_entry_2 **res_dir,
125 + int *inlined, struct htree_lock *lck);
126 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
127 + struct inode *inode, struct htree_lock *lck);
130 * Describe an inode's exact location on disk and in memory
131 @@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in
132 const char *name, int namelen, void *data);
133 static inline void ext4_update_dx_flag(struct inode *inode)
135 + /* Disable it for ldiskfs, because going from a DX directory to
136 + * a non-DX directory while it is in use will completely break
137 + * the htree-locking.
138 + * If we really want to support this operation in the future,
139 + * we need to exclusively lock the directory at here which will
140 + * increase complexity of code */
142 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
143 EXT4_FEATURE_COMPAT_DIR_INDEX))
144 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
147 static unsigned char ext4_filetype_table[] = {
148 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
149 Index: linux-3.10.0-1160.2.1.el7.x86_64/fs/ext4/namei.c
150 ===================================================================
151 --- linux-3.10.0-1160.2.1.el7.x86_64.orig/fs/ext4/namei.c
152 +++ linux-3.10.0-1160.2.1.el7.x86_64/fs/ext4/namei.c
153 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
156 struct buffer_head *bh;
157 + struct ext4_inode_info *ei = EXT4_I(inode);
160 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
161 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
162 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
163 return ERR_PTR(-ENOSPC);
165 + /* with parallel dir operations all appends
166 + * have to be serialized -bzzz */
167 + down(&ei->i_append_sem);
169 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
171 bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE, &err);
174 + up(&ei->i_append_sem);
177 inode->i_size += inode->i_sb->s_blocksize;
178 EXT4_I(inode)->i_disksize = inode->i_size;
179 BUFFER_TRACE(bh, "get_write_access");
180 err = ext4_journal_get_write_access(handle, bh);
181 + up(&ei->i_append_sem);
184 ext4_std_error(inode->i_sb, err);
185 @@ -247,7 +255,7 @@ static struct dx_frame *dx_probe(const s
187 struct dx_hash_info *hinfo,
188 struct dx_frame *frame,
190 + struct htree_lock *lck, int *err);
191 static void dx_release(struct dx_frame *frames);
192 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
193 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
194 @@ -260,13 +268,13 @@ static void dx_insert_block(struct dx_fr
195 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
196 struct dx_frame *frame,
197 struct dx_frame *frames,
198 - __u32 *start_hash);
199 + __u32 *start_hash, struct htree_lock *lck);
200 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
201 const struct qstr *d_name,
202 struct ext4_dir_entry_2 **res_dir,
204 + struct htree_lock *lck, int *err);
205 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
206 - struct inode *inode);
207 + struct inode *inode, struct htree_lock *lck);
209 /* checksumming functions */
210 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
211 @@ -670,6 +678,227 @@ struct stats dx_show_entries(struct dx_h
213 #endif /* DX_DEBUG */
215 +/* private data for htree_lock */
216 +struct ext4_dir_lock_data {
217 + unsigned ld_flags; /* bits-map for lock types */
218 + unsigned ld_count; /* # entries of the last DX block */
219 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
220 + struct dx_entry *ld_at; /* position of leaf dx_entry */
223 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
224 +#define ext4_find_entry(dir, name, dirent, inline) \
225 + __ext4_find_entry(dir, name, dirent, inline, NULL)
226 +#define ext4_add_entry(handle, dentry, inode) \
227 + __ext4_add_entry(handle, dentry, inode, NULL)
229 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
230 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
232 +static void ext4_htree_event_cb(void *target, void *event)
234 + u64 *block = (u64 *)target;
236 + if (*block == dx_get_block((struct dx_entry *)event))
237 + *block = EXT4_HTREE_NODE_CHANGED;
240 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
242 + struct htree_lock_head *lhead;
244 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
245 + if (lhead != NULL) {
246 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
247 + ext4_htree_event_cb);
251 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
253 +struct htree_lock *ext4_htree_lock_alloc(void)
255 + return htree_lock_alloc(EXT4_LK_MAX,
256 + sizeof(struct ext4_dir_lock_data));
258 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
260 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
263 + default: /* 0 or unknown flags require EX lock */
264 + return HTREE_LOCK_EX;
265 + case EXT4_HLOCK_READDIR:
266 + return HTREE_LOCK_PR;
267 + case EXT4_HLOCK_LOOKUP:
268 + return HTREE_LOCK_CR;
269 + case EXT4_HLOCK_DEL:
270 + case EXT4_HLOCK_ADD:
271 + return HTREE_LOCK_CW;
275 +/* return PR for read-only operations, otherwise return EX */
276 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
278 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
280 + /* 0 requires EX lock */
281 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
284 +static int ext4_htree_safe_locked(struct htree_lock *lck)
288 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
291 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
293 + if (writer) /* all readers & writers are excluded? */
294 + return lck->lk_mode == HTREE_LOCK_EX;
296 + /* all writers are excluded? */
297 + return lck->lk_mode == HTREE_LOCK_PR ||
298 + lck->lk_mode == HTREE_LOCK_PW ||
299 + lck->lk_mode == HTREE_LOCK_EX;
302 +/* relock htree_lock with EX mode if it's change operation, otherwise
303 + * relock it with PR mode. It's noop if PDO is disabled. */
304 +static void ext4_htree_safe_relock(struct htree_lock *lck)
306 + if (!ext4_htree_safe_locked(lck)) {
307 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
309 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
313 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
314 + struct inode *dir, unsigned flags)
316 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
317 + ext4_htree_safe_mode(flags);
319 + ext4_htree_lock_data(lck)->ld_flags = flags;
320 + htree_lock(lck, lhead, mode);
322 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
324 +EXPORT_SYMBOL(ext4_htree_lock);
326 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
327 + unsigned lmask, int wait, void *ev)
329 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
332 + /* NOOP if htree is well protected or caller doesn't require the lock */
333 + if (ext4_htree_safe_locked(lck) ||
334 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
337 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
338 + HTREE_LOCK_PW : HTREE_LOCK_PR;
340 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
342 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
344 + cpu_relax(); /* spin until granted */
348 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
350 + return ext4_htree_safe_locked(lck) ||
351 + htree_node_is_granted(lck, ffz(~lmask));
354 +static void ext4_htree_node_unlock(struct htree_lock *lck,
355 + unsigned lmask, void *buf)
357 + /* NB: it's safe to call mutiple times or even it's not locked */
358 + if (!ext4_htree_safe_locked(lck) &&
359 + htree_node_is_granted(lck, ffz(~lmask)))
360 + htree_node_unlock(lck, ffz(~lmask), buf);
363 +#define ext4_htree_dx_lock(lck, key) \
364 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
365 +#define ext4_htree_dx_lock_try(lck, key) \
366 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
367 +#define ext4_htree_dx_unlock(lck) \
368 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
369 +#define ext4_htree_dx_locked(lck) \
370 + ext4_htree_node_locked(lck, EXT4_LB_DX)
372 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
374 + struct ext4_dir_lock_data *ld;
376 + if (ext4_htree_safe_locked(lck))
379 + ld = ext4_htree_lock_data(lck);
380 + switch (ld->ld_flags) {
383 + case EXT4_HLOCK_LOOKUP:
384 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
386 + case EXT4_HLOCK_DEL:
387 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
389 + case EXT4_HLOCK_ADD:
390 + ld->ld_flags = EXT4_HLOCK_SPLIT;
395 +#define ext4_htree_de_lock(lck, key) \
396 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
397 +#define ext4_htree_de_unlock(lck) \
398 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
400 +#define ext4_htree_spin_lock(lck, key, event) \
401 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
402 +#define ext4_htree_spin_unlock(lck) \
403 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
404 +#define ext4_htree_spin_unlock_listen(lck, p) \
405 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
407 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
409 + if (!ext4_htree_safe_locked(lck) &&
410 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
411 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
415 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
416 + DX_HASH_COL_YES, /* there is collision and it does matter */
417 + DX_HASH_COL_NO, /* there is no collision */
420 +static int dx_probe_hash_collision(struct htree_lock *lck,
421 + struct dx_entry *entries,
422 + struct dx_entry *at, u32 hash)
424 + if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
425 + return DX_HASH_COL_IGNORE; /* don't care about collision */
427 + } else if (at == entries + dx_get_count(entries) - 1) {
428 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
430 + } else { /* hash collision? */
431 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
432 + DX_HASH_COL_YES : DX_HASH_COL_NO;
437 * Probe for a directory leaf block to search.
439 @@ -681,10 +910,11 @@ struct stats dx_show_entries(struct dx_h
441 static struct dx_frame *
442 dx_probe(const struct qstr *d_name, struct inode *dir,
443 - struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
444 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
445 + struct htree_lock *lck, int *err)
447 unsigned count, indirect;
448 - struct dx_entry *at, *entries, *p, *q, *m;
449 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
450 struct dx_root_info *info;
451 struct buffer_head *bh;
452 struct dx_frame *frame = frame_in;
453 @@ -758,8 +988,15 @@ dx_probe(const struct qstr *d_name, stru
454 dxtrace(printk("Look up %x", hash));
457 + if (indirect == 0) { /* the last index level */
458 + /* NB: ext4_htree_dx_lock() could be noop if
459 + * DX-lock flag is not set for current operation */
460 + ext4_htree_dx_lock(lck, dx);
461 + ext4_htree_spin_lock(lck, dx, NULL);
463 count = dx_get_count(entries);
464 - if (!count || count > dx_get_limit(entries)) {
465 + if (count == 0 || count > dx_get_limit(entries)) {
466 + ext4_htree_spin_unlock(lck); /* release spin */
467 ext4_warning(dir->i_sb,
468 "dx entry: no count or count > limit");
470 @@ -800,7 +1037,70 @@ dx_probe(const struct qstr *d_name, stru
472 frame->entries = entries;
474 - if (!indirect--) return frame;
476 + if (indirect == 0) { /* the last index level */
477 + struct ext4_dir_lock_data *ld;
480 + /* By default we only lock DE-block, however, we will
481 + * also lock the last level DX-block if:
482 + * a) there is hash collision
483 + * we will set DX-lock flag (a few lines below)
484 + * and redo to lock DX-block
485 + * see detail in dx_probe_hash_collision()
486 + * b) it's a retry from splitting
487 + * we need to lock the last level DX-block so nobody
488 + * else can split any leaf blocks under the same
489 + * DX-block, see detail in ext4_dx_add_entry()
491 + if (ext4_htree_dx_locked(lck)) {
492 + /* DX-block is locked, just lock DE-block
494 + ext4_htree_spin_unlock(lck);
495 + if (!ext4_htree_safe_locked(lck))
496 + ext4_htree_de_lock(lck, frame->at);
499 + /* it's pdirop and no DX lock */
500 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
502 + /* found hash collision, set DX-lock flag
503 + * and retry to abtain DX-lock */
504 + ext4_htree_spin_unlock(lck);
505 + ext4_htree_dx_need_lock(lck);
508 + ld = ext4_htree_lock_data(lck);
509 + /* because I don't lock DX, so @at can't be trusted
510 + * after I release spinlock so I have to save it */
512 + ld->ld_at_entry = *at;
513 + ld->ld_count = dx_get_count(entries);
515 + frame->at = &ld->ld_at_entry;
516 + myblock = dx_get_block(at);
518 + /* NB: ordering locking */
519 + ext4_htree_spin_unlock_listen(lck, &myblock);
520 + /* other thread can split this DE-block because:
521 + * a) I don't have lock for the DE-block yet
522 + * b) I released spinlock on DX-block
523 + * if it happened I can detect it by listening
524 + * splitting event on this DE-block */
525 + ext4_htree_de_lock(lck, frame->at);
526 + ext4_htree_spin_stop_listen(lck);
528 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
529 + /* someone split this DE-block before
530 + * I locked it, I need to retry and lock
531 + * valid DE-block */
532 + ext4_htree_de_unlock(lck);
539 bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
542 @@ -868,7 +1168,7 @@ static void dx_release (struct dx_frame
543 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
544 struct dx_frame *frame,
545 struct dx_frame *frames,
547 + __u32 *start_hash, struct htree_lock *lck)
550 struct buffer_head *bh;
551 @@ -883,12 +1183,22 @@ static int ext4_htree_next_block(struct
552 * this loop, num_frames indicates the number of interior
553 * nodes need to be read.
555 + ext4_htree_de_unlock(lck);
557 - if (++(p->at) < p->entries + dx_get_count(p->entries))
559 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
560 + /* num_frames > 0 :
562 + * ext4_htree_dx_locked:
563 + * frame->at is reliable pointer returned by dx_probe,
564 + * otherwise dx_probe already knew no collision */
565 + if (++(p->at) < p->entries + dx_get_count(p->entries))
571 + if (num_frames == 1)
572 + ext4_htree_dx_unlock(lck);
576 @@ -911,6 +1221,13 @@ static int ext4_htree_next_block(struct
577 * block so no check is necessary
579 while (num_frames--) {
580 + if (num_frames == 0) {
581 + /* it's not always necessary, we just don't want to
582 + * detect hash collision again */
583 + ext4_htree_dx_need_lock(lck);
584 + ext4_htree_dx_lock(lck, p->at);
587 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
590 @@ -919,6 +1236,7 @@ static int ext4_htree_next_block(struct
592 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
594 + ext4_htree_de_lock(lck, p->at);
598 @@ -1021,10 +1339,10 @@ int ext4_htree_fill_tree(struct file *di
600 hinfo.hash = start_hash;
601 hinfo.minor_hash = 0;
602 - frame = dx_probe(NULL, dir, &hinfo, frames, &err);
603 + /* assume it's PR locked */
604 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
608 /* Add '.' and '..' from the htree header */
609 if (!start_hash && !start_minor_hash) {
610 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
611 @@ -1051,7 +1369,7 @@ int ext4_htree_fill_tree(struct file *di
614 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
615 - frame, frames, &hashval);
616 + frame, frames, &hashval, NULL);
617 *next_hash = hashval;
620 @@ -1244,10 +1562,10 @@ static int is_dx_internal_node(struct in
621 * The returned buffer_head has ->b_count elevated. The caller is expected
622 * to brelse() it when appropriate.
624 -static struct buffer_head * ext4_find_entry (struct inode *dir,
625 +struct buffer_head *__ext4_find_entry(struct inode *dir,
626 const struct qstr *d_name,
627 struct ext4_dir_entry_2 **res_dir,
629 + int *inlined, struct htree_lock *lck)
631 struct super_block *sb;
632 struct buffer_head *bh_use[NAMEI_RA_SIZE];
633 @@ -1291,7 +1609,7 @@ static struct buffer_head * ext4_find_en
637 - bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
638 + bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
640 * On success, or if the error was file not found,
641 * return. Otherwise, fall back to doing a search the
642 @@ -1305,6 +1623,7 @@ static struct buffer_head * ext4_find_en
644 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
646 + ext4_htree_safe_relock(lck);
649 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
650 @@ -1402,9 +1721,12 @@ cleanup_and_exit:
651 brelse(bh_use[ra_ptr]);
654 +EXPORT_SYMBOL(__ext4_find_entry);
656 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
657 - struct ext4_dir_entry_2 **res_dir, int *err)
658 +static struct buffer_head *ext4_dx_find_entry(struct inode *dir,
659 + const struct qstr *d_name,
660 + struct ext4_dir_entry_2 **res_dir,
661 + struct htree_lock *lck, int *err)
663 struct super_block * sb = dir->i_sb;
664 struct dx_hash_info hinfo;
665 @@ -1413,7 +1735,7 @@ static struct buffer_head * ext4_dx_find
669 - if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
670 + if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
673 block = dx_get_block(frame->at);
674 @@ -1437,7 +1759,7 @@ static struct buffer_head * ext4_dx_find
676 /* Check to see if we should continue to search */
677 retval = ext4_htree_next_block(dir, hinfo.hash, frame,
679 + frames, NULL, lck);
682 "error reading index page in directory #%lu",
683 @@ -1597,8 +1919,9 @@ static struct ext4_dir_entry_2* dx_pack_
684 * Returns pointer to de in block into which the new entry will be inserted.
686 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
687 - struct buffer_head **bh,struct dx_frame *frame,
688 - struct dx_hash_info *hinfo, int *error)
689 + struct buffer_head **bh, struct dx_frame *frames,
690 + struct dx_frame *frame, struct dx_hash_info *hinfo,
691 + struct htree_lock *lck, int *error)
693 unsigned blocksize = dir->i_sb->s_blocksize;
694 unsigned count, continued;
695 @@ -1661,7 +1984,14 @@ static struct ext4_dir_entry_2 *do_split
696 hash2, split, count-split));
698 /* Fancy dance to stay within two buffers */
699 - de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
700 + if (hinfo->hash < hash2) {
701 + de2 = dx_move_dirents(data1, data2, map + split,
702 + count - split, blocksize);
704 + /* make sure we will add entry to the same block which
705 + * we have already locked */
706 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
708 de = dx_pack_dirents(data1, blocksize);
709 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
711 @@ -1680,13 +2010,21 @@ static struct ext4_dir_entry_2 *do_split
712 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
713 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
715 - /* Which block gets the new entry? */
716 - if (hinfo->hash >= hash2)
720 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
721 + frame->at); /* notify block is being split */
722 + if (hinfo->hash < hash2) {
723 + dx_insert_block(frame, hash2 + continued, newblock);
726 + /* switch block number */
727 + dx_insert_block(frame, hash2 + continued,
728 + dx_get_block(frame->at));
729 + dx_set_block(frame->at, newblock);
732 - dx_insert_block(frame, hash2 + continued, newblock);
733 + ext4_htree_spin_unlock(lck);
734 + ext4_htree_dx_unlock(lck);
736 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
739 @@ -1965,7 +2303,7 @@ static int make_indexed_dir(handle_t *ha
743 - de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
744 + de = do_split(handle, dir, &bh2, frames, frame, &hinfo, NULL, &retval);
748 @@ -2072,8 +2410,8 @@ out:
749 * may not sleep between calling this and putting something into
750 * the entry, as someone else might have used it while you slept.
752 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
753 - struct inode *inode)
754 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
755 + struct inode *inode, struct htree_lock *lck)
757 struct inode *dir = dentry->d_parent->d_inode;
758 struct buffer_head *bh = NULL;
759 @@ -2108,9 +2446,10 @@ static int ext4_add_entry(handle_t *hand
760 if (dentry->d_name.len == 2 &&
761 memcmp(dentry->d_name.name, "..", 2) == 0)
762 return ext4_update_dotdot(handle, dentry, inode);
763 - retval = ext4_dx_add_entry(handle, dentry, inode);
764 + retval = ext4_dx_add_entry(handle, dentry, inode, lck);
765 if (!retval || (retval != ERR_BAD_DX_DIR))
767 + ext4_htree_safe_relock(lck);
768 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
770 ext4_mark_inode_dirty(handle, dir);
771 @@ -2152,12 +2491,13 @@ out:
772 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
775 +EXPORT_SYMBOL(__ext4_add_entry);
778 * Returns 0 for success, or a negative error value
780 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
781 - struct inode *inode)
782 + struct inode *inode, struct htree_lock *lck)
784 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
785 struct dx_entry *entries, *at;
786 @@ -2171,7 +2511,7 @@ static int ext4_dx_add_entry(handle_t *h
790 - frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
791 + frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
794 entries = frame->entries;
795 @@ -2201,6 +2541,11 @@ again:
796 struct dx_node *node2;
797 struct buffer_head *bh2;
799 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
800 + ext4_htree_safe_relock(lck);
804 while (frame > frames) {
805 if (dx_get_count((frame - 1)->entries) <
806 dx_get_limit((frame - 1)->entries)) {
807 @@ -2304,16 +2649,43 @@ again:
811 + } else if (!ext4_htree_dx_locked(lck)) {
812 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
814 + /* not well protected, require DX lock */
815 + ext4_htree_dx_need_lock(lck);
816 + at = frame > frames ? (frame - 1)->at : NULL;
818 + /* NB: no risk of deadlock because it's just a try.
820 + * NB: we check ld_count for twice, the first time before
821 + * having DX lock, the second time after holding DX lock.
823 + * NB: We never free blocks for directory so far, which
824 + * means value returned by dx_get_count() should equal to
825 + * ld->ld_count if nobody split any DE-block under @at,
826 + * and ld->ld_at still points to valid dx_entry. */
827 + if ((ld->ld_count != dx_get_count(entries)) ||
828 + !ext4_htree_dx_lock_try(lck, at) ||
829 + (ld->ld_count != dx_get_count(entries))) {
833 + /* OK, I've got DX lock and nothing changed */
834 + frame->at = ld->ld_at;
836 - de = do_split(handle, dir, &bh, frame, &hinfo, &err);
837 + de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
841 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
845 ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
847 + ext4_htree_dx_unlock(lck);
848 + ext4_htree_de_unlock(lck);
851 /* @restart is true means htree-path has been changed, we need to
852 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
853 ===================================================================
854 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c
855 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
856 @@ -875,6 +875,7 @@ static struct inode *ext4_alloc_inode(st
858 ei->vfs_inode.i_version = 1;
859 spin_lock_init(&ei->i_raw_lock);
860 + sema_init(&ei->i_append_sem, 1);
861 INIT_LIST_HEAD(&ei->i_prealloc_list);
862 spin_lock_init(&ei->i_prealloc_lock);
863 ext4_es_init_tree(&ei->i_es_tree);