Whamcloud - gitweb
LU-17672 ldiskfs: release s_mb_prealloc_table
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / linux-5.8 / ext4-pdirop.patch
1 From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
2 From: Shaun Tancheff <stancheff@cray.com>
3 Date: Wed, 15 Jan 2020 07:35:13 -0600
4 Subject: [PATCH] Single directory performance is a critical for HPC workloads.
5  In a typical use case an application creates a separate output file for each
6  node and task in a job. As nodes and tasks increase, hundreds of thousands of
7  files may be created in a single directory within a short window of time.
8  Today, both filename lookup and file system modifying operations (such as
9  create and unlink) are protected with a single lock for an entire ldiskfs
10  directory. PDO project will remove this bottleneck by introducing a parallel
11  locking mechanism for entire ldiskfs directories. This work will enable
12  multiple application threads to simultaneously lookup, create and unlink in
13  parallel.
14
15 This patch contains:
16  - pdirops support for ldiskfs
17  - integrate with osd-ldiskfs
18 ---
19  fs/ext4/Makefile |    1 
20  fs/ext4/ext4.h   |   78 +++++++++
21  fs/ext4/namei.c  |  454 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
22  fs/ext4/super.c  |    1 
23  4 files changed, 494 insertions(+), 40 deletions(-)
24  create mode 100644 fs/ext4/htree_lock.c
25  create mode 100644 include/linux/htree_lock.h
26
27 --- a/fs/ext4/Makefile
28 +++ b/fs/ext4/Makefile
29 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
30  
31  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
32                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
33 +               htree_lock.o \
34                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
35                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
36                 super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
37 --- a/fs/ext4/ext4.h
38 +++ b/fs/ext4/ext4.h
39 @@ -29,6 +29,7 @@
40  #include <linux/timer.h>
41  #include <linux/version.h>
42  #include <linux/wait.h>
43 +#include <linux/htree_lock.h>
44  #include <linux/sched/signal.h>
45  #include <linux/blockgroup_lock.h>
46  #include <linux/percpu_counter.h>
47 @@ -987,6 +988,9 @@ struct ext4_inode_info {
48         __u32   i_dtime;
49         ext4_fsblk_t    i_file_acl;
50  
51 +       /* following fields for parallel directory operations -bzzz */
52 +       struct semaphore i_append_sem;
53 +
54         /*
55          * i_block_group is the number of the block group which contains
56          * this file's inode.  Constant across the lifetime of the inode,
57 @@ -2299,6 +2303,72 @@ struct dx_hash_info
58   */
59  #define HASH_NB_ALWAYS         1
60  
61 +/* assume name-hash is protected by upper layer */
62 +#define EXT4_HTREE_LOCK_HASH   0
63 +
64 +enum ext4_pdo_lk_types {
65 +#if EXT4_HTREE_LOCK_HASH
66 +       EXT4_LK_HASH,
67 +#endif
68 +       EXT4_LK_DX,             /* index block */
69 +       EXT4_LK_DE,             /* directory entry block */
70 +       EXT4_LK_SPIN,           /* spinlock */
71 +       EXT4_LK_MAX,
72 +};
73 +
74 +/* read-only bit */
75 +#define EXT4_LB_RO(b)          (1 << (b))
76 +/* read + write, high bits for writer */
77 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
78 +
79 +enum ext4_pdo_lock_bits {
80 +       /* DX lock bits */
81 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
82 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
83 +       /* DE lock bits */
84 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
85 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
86 +       /* DX spinlock bits */
87 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
88 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
89 +       /* accurate searching */
90 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
91 +};
92 +
93 +enum ext4_pdo_lock_opc {
94 +       /* external */
95 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
96 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
97 +                                  EXT4_LB_EXACT),
98 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
99 +                                  EXT4_LB_EXACT),
100 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
101 +
102 +       /* internal */
103 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
104 +                                  EXT4_LB_EXACT),
105 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
106 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
107 +};
108 +
109 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
110 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
111 +
112 +extern struct htree_lock *ext4_htree_lock_alloc(void);
113 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
114 +
115 +extern void ext4_htree_lock(struct htree_lock *lck,
116 +                           struct htree_lock_head *lhead,
117 +                           struct inode *dir, unsigned flags);
118 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
119 +
120 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
121 +                                       const struct qstr *d_name,
122 +                                       struct ext4_dir_entry_2 **res_dir,
123 +                                       int *inlined, struct htree_lock *lck);
124 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
125 +                     struct inode *inode, struct htree_lock *lck);
126 +
127  struct ext4_filename {
128         const struct qstr *usr_fname;
129         struct fscrypt_str disk_name;
130 @@ -2666,11 +2736,19 @@ void ext4_insert_dentry(struct inode *in
131                         struct ext4_filename *fname, void *data);
132  static inline void ext4_update_dx_flag(struct inode *inode)
133  {
134 +       /* Disable it for ldiskfs, because going from a DX directory to
135 +        * a non-DX directory while it is in use will completely break
136 +        * the htree-locking.
137 +        * If we really want to support this operation in the future,
138 +        * we need to exclusively lock the directory at here which will
139 +        * increase complexity of code */
140 +#if 0
141         if (!ext4_has_feature_dir_index(inode->i_sb)) {
142                 /* ext4_iget() should have caught this... */
143                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
144                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
145         }
146 +#endif
147  }
148  static const unsigned char ext4_filetype_table[] = {
149         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
150 --- a/fs/ext4/namei.c
151 +++ b/fs/ext4/namei.c
152 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
153                                         ext4_lblk_t *block)
154  {
155         struct buffer_head *bh;
156 +       struct ext4_inode_info *ei = EXT4_I(inode);
157         int err;
158  
159         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
160 @@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t
161                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
162                 return ERR_PTR(-ENOSPC);
163  
164 +       /* with parallel dir operations all appends
165 +       * have to be serialized -bzzz */
166 +       down(&ei->i_append_sem);
167 +
168         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
169  
170         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
171 -       if (IS_ERR(bh))
172 +       if (IS_ERR(bh)) {
173 +               up(&ei->i_append_sem);
174                 return bh;
175 +       }
176         inode->i_size += inode->i_sb->s_blocksize;
177         EXT4_I(inode)->i_disksize = inode->i_size;
178         BUFFER_TRACE(bh, "get_write_access");
179         err = ext4_journal_get_write_access(handle, bh);
180 +       up(&ei->i_append_sem);
181         if (err) {
182                 brelse(bh);
183                 ext4_std_error(inode->i_sb, err);
184 @@ -271,7 +279,8 @@ static unsigned dx_node_limit(struct ino
185  static struct dx_frame *dx_probe(struct ext4_filename *fname,
186                                  struct inode *dir,
187                                  struct dx_hash_info *hinfo,
188 -                                struct dx_frame *frame);
189 +                                struct dx_frame *frame,
190 +                                struct htree_lock *lck);
191  static void dx_release(struct dx_frame *frames);
192  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
193                        unsigned blocksize, struct dx_hash_info *hinfo,
194 @@ -285,12 +294,13 @@ static void dx_insert_block(struct dx_fr
195  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
196                                  struct dx_frame *frame,
197                                  struct dx_frame *frames,
198 -                                __u32 *start_hash);
199 +                                __u32 *start_hash, struct htree_lock *lck);
200  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
201                 struct ext4_filename *fname,
202 -               struct ext4_dir_entry_2 **res_dir);
203 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
204  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
205 -                            struct inode *dir, struct inode *inode);
206 +                            struct inode *dir, struct inode *inode,
207 +                            struct htree_lock *lck);
208  
209  /* checksumming functions */
210  void ext4_initialize_dirent_tail(struct buffer_head *bh,
211 @@ -755,6 +765,227 @@ struct stats dx_show_entries(struct dx_h
212  }
213  #endif /* DX_DEBUG */
214  
215 +/* private data for htree_lock */
216 +struct ext4_dir_lock_data {
217 +       unsigned                ld_flags;  /* bits-map for lock types */
218 +       unsigned                ld_count;  /* # entries of the last DX block */
219 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
220 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
221 +};
222 +
223 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
224 +#define ext4_find_entry(dir, name, dirent, inline) \
225 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
226 +#define ext4_add_entry(handle, dentry, inode) \
227 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
228 +
229 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
230 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
231 +
232 +static void ext4_htree_event_cb(void *target, void *event)
233 +{
234 +       u64 *block = (u64 *)target;
235 +
236 +       if (*block == dx_get_block((struct dx_entry *)event))
237 +               *block = EXT4_HTREE_NODE_CHANGED;
238 +}
239 +
240 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
241 +{
242 +       struct htree_lock_head *lhead;
243 +
244 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
245 +       if (lhead != NULL) {
246 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
247 +                                       ext4_htree_event_cb);
248 +       }
249 +       return lhead;
250 +}
251 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
252 +
253 +struct htree_lock *ext4_htree_lock_alloc(void)
254 +{
255 +       return htree_lock_alloc(EXT4_LK_MAX,
256 +                               sizeof(struct ext4_dir_lock_data));
257 +}
258 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
259 +
260 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
261 +{
262 +       switch (flags) {
263 +       default: /* 0 or unknown flags require EX lock */
264 +               return HTREE_LOCK_EX;
265 +       case EXT4_HLOCK_READDIR:
266 +               return HTREE_LOCK_PR;
267 +       case EXT4_HLOCK_LOOKUP:
268 +               return HTREE_LOCK_CR;
269 +       case EXT4_HLOCK_DEL:
270 +       case EXT4_HLOCK_ADD:
271 +               return HTREE_LOCK_CW;
272 +       }
273 +}
274 +
275 +/* return PR for read-only operations, otherwise return EX */
276 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
277 +{
278 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
279 +
280 +       /* 0 requires EX lock */
281 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
282 +}
283 +
284 +static int ext4_htree_safe_locked(struct htree_lock *lck)
285 +{
286 +       int writer;
287 +
288 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
289 +               return 1;
290 +
291 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
292 +                EXT4_LB_DE;
293 +       if (writer) /* all readers & writers are excluded? */
294 +               return lck->lk_mode == HTREE_LOCK_EX;
295 +
296 +       /* all writers are excluded? */
297 +       return lck->lk_mode == HTREE_LOCK_PR ||
298 +              lck->lk_mode == HTREE_LOCK_PW ||
299 +              lck->lk_mode == HTREE_LOCK_EX;
300 +}
301 +
302 +/* relock htree_lock with EX mode if it's change operation, otherwise
303 + * relock it with PR mode. It's noop if PDO is disabled. */
304 +static void ext4_htree_safe_relock(struct htree_lock *lck)
305 +{
306 +       if (!ext4_htree_safe_locked(lck)) {
307 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
308 +
309 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
310 +       }
311 +}
312 +
313 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
314 +                    struct inode *dir, unsigned flags)
315 +{
316 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
317 +                                             ext4_htree_safe_mode(flags);
318 +
319 +       ext4_htree_lock_data(lck)->ld_flags = flags;
320 +       htree_lock(lck, lhead, mode);
321 +       if (!is_dx(dir))
322 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
323 +}
324 +EXPORT_SYMBOL(ext4_htree_lock);
325 +
326 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
327 +                               unsigned lmask, int wait, void *ev)
328 +{
329 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
330 +       u32     mode;
331 +
332 +       /* NOOP if htree is well protected or caller doesn't require the lock */
333 +       if (ext4_htree_safe_locked(lck) ||
334 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
335 +               return 1;
336 +
337 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
338 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
339 +       while (1) {
340 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
341 +                       return 1;
342 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
343 +                       return 0;
344 +               cpu_relax(); /* spin until granted */
345 +       }
346 +}
347 +
348 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
349 +{
350 +       return ext4_htree_safe_locked(lck) ||
351 +              htree_node_is_granted(lck, ffz(~lmask));
352 +}
353 +
354 +static void ext4_htree_node_unlock(struct htree_lock *lck,
355 +                                  unsigned lmask, void *buf)
356 +{
357 +       /* NB: it's safe to call mutiple times or even it's not locked */
358 +       if (!ext4_htree_safe_locked(lck) &&
359 +            htree_node_is_granted(lck, ffz(~lmask)))
360 +               htree_node_unlock(lck, ffz(~lmask), buf);
361 +}
362 +
363 +#define ext4_htree_dx_lock(lck, key)           \
364 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
365 +#define ext4_htree_dx_lock_try(lck, key)       \
366 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
367 +#define ext4_htree_dx_unlock(lck)              \
368 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
369 +#define ext4_htree_dx_locked(lck)              \
370 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
371 +
372 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
373 +{
374 +       struct ext4_dir_lock_data *ld;
375 +
376 +       if (ext4_htree_safe_locked(lck))
377 +               return;
378 +
379 +       ld = ext4_htree_lock_data(lck);
380 +       switch (ld->ld_flags) {
381 +       default:
382 +               return;
383 +       case EXT4_HLOCK_LOOKUP:
384 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
385 +               return;
386 +       case EXT4_HLOCK_DEL:
387 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
388 +               return;
389 +       case EXT4_HLOCK_ADD:
390 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
391 +               return;
392 +       }
393 +}
394 +
395 +#define ext4_htree_de_lock(lck, key)           \
396 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
397 +#define ext4_htree_de_unlock(lck)              \
398 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
399 +
400 +#define ext4_htree_spin_lock(lck, key, event)  \
401 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
402 +#define ext4_htree_spin_unlock(lck)            \
403 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
404 +#define ext4_htree_spin_unlock_listen(lck, p)  \
405 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
406 +
407 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
408 +{
409 +       if (!ext4_htree_safe_locked(lck) &&
410 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
411 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
412 +}
413 +
414 +enum {
415 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
416 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
417 +       DX_HASH_COL_NO,         /* there is no collision */
418 +};
419 +
420 +static int dx_probe_hash_collision(struct htree_lock *lck,
421 +                                  struct dx_entry *entries,
422 +                                  struct dx_entry *at, u32 hash)
423 +{
424 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
425 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
426 +
427 +       } else if (at == entries + dx_get_count(entries) - 1) {
428 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
429 +
430 +       } else { /* hash collision? */
431 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
432 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
433 +       }
434 +}
435 +
436  /*
437   * Probe for a directory leaf block to search.
438   *
439 @@ -766,10 +997,11 @@ struct stats dx_show_entries(struct dx_h
440   */
441  static struct dx_frame *
442  dx_probe(struct ext4_filename *fname, struct inode *dir,
443 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
444 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
445 +        struct htree_lock *lck)
446  {
447         unsigned count, indirect;
448 -       struct dx_entry *at, *entries, *p, *q, *m;
449 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
450         struct dx_root_info *info;
451         struct dx_frame *frame = frame_in;
452         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
453 @@ -831,8 +1063,15 @@ dx_probe(struct ext4_filename *fname, st
454  
455         dxtrace(printk("Look up %x", hash));
456         while (1) {
457 +               if (indirect == 0) { /* the last index level */
458 +                       /* NB: ext4_htree_dx_lock() could be noop if
459 +                        * DX-lock flag is not set for current operation */
460 +                       ext4_htree_dx_lock(lck, dx);
461 +                       ext4_htree_spin_lock(lck, dx, NULL);
462 +               }
463                 count = dx_get_count(entries);
464 -               if (!count || count > dx_get_limit(entries)) {
465 +               if (count == 0 || count > dx_get_limit(entries)) {
466 +                       ext4_htree_spin_unlock(lck); /* release spin */
467                         ext4_warning_inode(dir,
468                                            "dx entry: count %u beyond limit %u",
469                                            count, dx_get_limit(entries));
470 @@ -871,8 +1110,70 @@ dx_probe(struct ext4_filename *fname, st
471                                dx_get_block(at)));
472                 frame->entries = entries;
473                 frame->at = at;
474 -               if (!indirect--)
475 +
476 +               if (indirect == 0) { /* the last index level */
477 +                       struct ext4_dir_lock_data *ld;
478 +                       u64 myblock;
479 +
480 +                       /* By default we only lock DE-block, however, we will
481 +                        * also lock the last level DX-block if:
482 +                        * a) there is hash collision
483 +                        *    we will set DX-lock flag (a few lines below)
484 +                        *    and redo to lock DX-block
485 +                        *    see detail in dx_probe_hash_collision()
486 +                        * b) it's a retry from splitting
487 +                        *    we need to lock the last level DX-block so nobody
488 +                        *    else can split any leaf blocks under the same
489 +                        *    DX-block, see detail in ext4_dx_add_entry()
490 +                        */
491 +                       if (ext4_htree_dx_locked(lck)) {
492 +                               /* DX-block is locked, just lock DE-block
493 +                                * and return */
494 +                               ext4_htree_spin_unlock(lck);
495 +                               if (!ext4_htree_safe_locked(lck))
496 +                                       ext4_htree_de_lock(lck, frame->at);
497 +                               return frame;
498 +                       }
499 +                       /* it's pdirop and no DX lock */
500 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
501 +                           DX_HASH_COL_YES) {
502 +                               /* found hash collision, set DX-lock flag
503 +                                * and retry to abtain DX-lock */
504 +                               ext4_htree_spin_unlock(lck);
505 +                               ext4_htree_dx_need_lock(lck);
506 +                               continue;
507 +                       }
508 +                       ld = ext4_htree_lock_data(lck);
509 +                       /* because I don't lock DX, so @at can't be trusted
510 +                        * after I release spinlock so I have to save it */
511 +                       ld->ld_at = at;
512 +                       ld->ld_at_entry = *at;
513 +                       ld->ld_count = dx_get_count(entries);
514 +
515 +                       frame->at = &ld->ld_at_entry;
516 +                       myblock = dx_get_block(at);
517 +
518 +                       /* NB: ordering locking */
519 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
520 +                       /* other thread can split this DE-block because:
521 +                        * a) I don't have lock for the DE-block yet
522 +                        * b) I released spinlock on DX-block
523 +                        * if it happened I can detect it by listening
524 +                        * splitting event on this DE-block */
525 +                       ext4_htree_de_lock(lck, frame->at);
526 +                       ext4_htree_spin_stop_listen(lck);
527 +
528 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
529 +                               /* someone split this DE-block before
530 +                                * I locked it, I need to retry and lock
531 +                                * valid DE-block */
532 +                               ext4_htree_de_unlock(lck);
533 +                               continue;
534 +                       }
535                         return frame;
536 +               }
537 +               dx = at;
538 +               indirect--;
539                 frame++;
540                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
541                 if (IS_ERR(frame->bh)) {
542 @@ -941,7 +1242,7 @@ static void dx_release(struct dx_frame *
543  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
544                                  struct dx_frame *frame,
545                                  struct dx_frame *frames,
546 -                                __u32 *start_hash)
547 +                                __u32 *start_hash, struct htree_lock *lck)
548  {
549         struct dx_frame *p;
550         struct buffer_head *bh;
551 @@ -956,12 +1257,22 @@ static int ext4_htree_next_block(struct
552          * this loop, num_frames indicates the number of interior
553          * nodes need to be read.
554          */
555 +       ext4_htree_de_unlock(lck);
556         while (1) {
557 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
558 -                       break;
559 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
560 +                       /* num_frames > 0 :
561 +                        *   DX block
562 +                        * ext4_htree_dx_locked:
563 +                        *   frame->at is reliable pointer returned by dx_probe,
564 +                        *   otherwise dx_probe already knew no collision */
565 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
566 +                               break;
567 +               }
568                 if (p == frames)
569                         return 0;
570                 num_frames++;
571 +               if (num_frames == 1)
572 +                       ext4_htree_dx_unlock(lck);
573                 p--;
574         }
575  
576 @@ -984,6 +1295,13 @@ static int ext4_htree_next_block(struct
577          * block so no check is necessary
578          */
579         while (num_frames--) {
580 +               if (num_frames == 0) {
581 +                       /* it's not always necessary, we just don't want to
582 +                        * detect hash collision again */
583 +                       ext4_htree_dx_need_lock(lck);
584 +                       ext4_htree_dx_lock(lck, p->at);
585 +               }
586 +
587                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
588                 if (IS_ERR(bh))
589                         return PTR_ERR(bh);
590 @@ -992,6 +1310,7 @@ static int ext4_htree_next_block(struct
591                 p->bh = bh;
592                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
593         }
594 +       ext4_htree_de_lock(lck, p->at);
595         return 1;
596  }
597  
598 @@ -1136,10 +1455,10 @@ int ext4_htree_fill_tree(struct file *di
599         }
600         hinfo.hash = start_hash;
601         hinfo.minor_hash = 0;
602 -       frame = dx_probe(NULL, dir, &hinfo, frames);
603 +       /* assume it's PR locked */
604 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
605         if (IS_ERR(frame))
606                 return PTR_ERR(frame);
607 -
608         /* Add '.' and '..' from the htree header */
609         if (!start_hash && !start_minor_hash) {
610                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
611 @@ -1179,7 +1498,7 @@ int ext4_htree_fill_tree(struct file *di
612                 count += ret;
613                 hashval = ~0;
614                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
615 -                                           frame, frames, &hashval);
616 +                                           frame, frames, &hashval, NULL);
617                 *next_hash = hashval;
618                 if (ret < 0) {
619                         err = ret;
620 @@ -1455,7 +1774,7 @@ static int is_dx_internal_node(struct in
621  static struct buffer_head *__ext4_find_entry(struct inode *dir,
622                                              struct ext4_filename *fname,
623                                              struct ext4_dir_entry_2 **res_dir,
624 -                                            int *inlined)
625 +                                            int *inlined, struct htree_lock *lck)
626  {
627         struct super_block *sb;
628         struct buffer_head *bh_use[NAMEI_RA_SIZE];
629 @@ -1497,7 +1816,7 @@ static struct buffer_head *__ext4_find_e
630                 goto restart;
631         }
632         if (is_dx(dir)) {
633 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
634 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
635                 /*
636                  * On success, or if the error was file not found,
637                  * return.  Otherwise, fall back to doing a search the
638 @@ -1507,6 +1826,7 @@ static struct buffer_head *__ext4_find_e
639                         goto cleanup_and_exit;
640                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
641                                "falling back\n"));
642 +               ext4_htree_safe_relock(lck);
643                 ret = NULL;
644         }
645         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
646 @@ -1597,10 +1917,10 @@ cleanup_and_exit:
647         return ret;
648  }
649  
650 -static struct buffer_head *ext4_find_entry(struct inode *dir,
651 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
652                                            const struct qstr *d_name,
653                                            struct ext4_dir_entry_2 **res_dir,
654 -                                          int *inlined)
655 +                                          int *inlined, struct htree_lock *lck)
656  {
657         int err;
658         struct ext4_filename fname;
659 @@ -1612,12 +1932,14 @@ static struct buffer_head *ext4_find_ent
660         if (err)
661                 return ERR_PTR(err);
662  
663 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
664 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
665  
666         ext4_fname_free_filename(&fname);
667         return bh;
668  }
669  
670 +EXPORT_SYMBOL(ext4_find_entry_locked);
671 +
672  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
673                                              struct dentry *dentry,
674                                              struct ext4_dir_entry_2 **res_dir)
675 @@ -1632,7 +1954,7 @@ static struct buffer_head *ext4_lookup_e
676         if (err)
677                 return ERR_PTR(err);
678  
679 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
680 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
681  
682         ext4_fname_free_filename(&fname);
683         return bh;
684 @@ -1640,7 +1962,8 @@ static struct buffer_head *ext4_lookup_e
685  
686  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
687                         struct ext4_filename *fname,
688 -                       struct ext4_dir_entry_2 **res_dir)
689 +                       struct ext4_dir_entry_2 **res_dir,
690 +                       struct htree_lock *lck)
691  {
692         struct super_block * sb = dir->i_sb;
693         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
694 @@ -1651,7 +1974,7 @@ static struct buffer_head * ext4_dx_find
695  #ifdef CONFIG_FS_ENCRYPTION
696         *res_dir = NULL;
697  #endif
698 -       frame = dx_probe(fname, dir, NULL, frames);
699 +       frame = dx_probe(fname, dir, NULL, frames, lck);
700         if (IS_ERR(frame))
701                 return (struct buffer_head *) frame;
702         do {
703 @@ -1673,7 +1996,7 @@ static struct buffer_head * ext4_dx_find
704  
705                 /* Check to see if we should continue to search */
706                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
707 -                                              frames, NULL);
708 +                                              frames, NULL, lck);
709                 if (retval < 0) {
710                         ext4_warning_inode(dir,
711                                 "error %d reading directory index block",
712 @@ -1853,8 +2176,9 @@ static struct ext4_dir_entry_2* dx_pack_
713   * Returns pointer to de in block into which the new entry will be inserted.
714   */
715  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
716 -                       struct buffer_head **bh,struct dx_frame *frame,
717 -                       struct dx_hash_info *hinfo)
718 +                       struct buffer_head **bh, struct dx_frame *frames,
719 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
720 +                       struct htree_lock *lck)
721  {
722         unsigned blocksize = dir->i_sb->s_blocksize;
723         unsigned count, continued;
724 @@ -1915,8 +2239,14 @@ static struct ext4_dir_entry_2 *do_split
725                                         hash2, split, count-split));
726  
727         /* Fancy dance to stay within two buffers */
728 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
729 -                             blocksize);
730 +       if (hinfo->hash < hash2) {
731 +               de2 = dx_move_dirents(data1, data2, map + split,
732 +                                     count - split, blocksize);
733 +       } else {
734 +               /* make sure we will add entry to the same block which
735 +                * we have already locked */
736 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
737 +       }
738         de = dx_pack_dirents(data1, blocksize);
739         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
740                                            (char *) de,
741 @@ -1934,12 +2264,21 @@ static struct ext4_dir_entry_2 *do_split
742         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
743                         blocksize, 1));
744  
745 -       /* Which block gets the new entry? */
746 -       if (hinfo->hash >= hash2) {
747 -               swap(*bh, bh2);
748 -               de = de2;
749 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
750 +                            frame->at); /* notify block is being split */
751 +       if (hinfo->hash < hash2) {
752 +               dx_insert_block(frame, hash2 + continued, newblock);
753 +
754 +       } else {
755 +               /* switch block number */
756 +               dx_insert_block(frame, hash2 + continued,
757 +                               dx_get_block(frame->at));
758 +               dx_set_block(frame->at, newblock);
759 +               (frame->at)++;
760         }
761 -       dx_insert_block(frame, hash2 + continued, newblock);
762 +       ext4_htree_spin_unlock(lck);
763 +       ext4_htree_dx_unlock(lck);
764 +
765         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
766         if (err)
767                 goto journal_error;
768 @@ -2209,7 +2548,7 @@ static int make_indexed_dir(handle_t *ha
769         if (retval)
770                 goto out_frames;        
771  
772 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
773 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
774         if (IS_ERR(de)) {
775                 retval = PTR_ERR(de);
776                 goto out_frames;
777 @@ -2319,8 +2658,8 @@ out:
778   * may not sleep between calling this and putting something into
779   * the entry, as someone else might have used it while you slept.
780   */
781 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
782 -                         struct inode *inode)
783 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
784 +                         struct inode *inode, struct htree_lock *lck)
785  {
786         struct inode *dir = d_inode(dentry->d_parent);
787         struct buffer_head *bh = NULL;
788 @@ -2370,9 +2709,10 @@ static int ext4_add_entry(handle_t *hand
789                 if (dentry->d_name.len == 2 &&
790                     memcmp(dentry->d_name.name, "..", 2) == 0)
791                         return ext4_update_dotdot(handle, dentry, inode);
792 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
793 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
794                 if (!retval || (retval != ERR_BAD_DX_DIR))
795                         goto out;
796 +               ext4_htree_safe_relock(lck);
797                 /* Can we just ignore htree data? */
798                 if (ext4_has_metadata_csum(sb)) {
799                         EXT4_ERROR_INODE(dir,
800 @@ -2435,12 +2775,14 @@ out:
801                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
802         return retval;
803  }
804 +EXPORT_SYMBOL(ext4_add_entry_locked);
805  
806  /*
807   * Returns 0 for success, or a negative error value
808   */
809  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
810 -                            struct inode *dir, struct inode *inode)
811 +                            struct inode *dir, struct inode *inode,
812 +                            struct htree_lock *lck)
813  {
814         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
815         struct dx_entry *entries, *at;
816 @@ -2452,7 +2794,7 @@ static int ext4_dx_add_entry(handle_t *h
817  
818  again:
819         restart = 0;
820 -       frame = dx_probe(fname, dir, NULL, frames);
821 +       frame = dx_probe(fname, dir, NULL, frames, lck);
822         if (IS_ERR(frame))
823                 return PTR_ERR(frame);
824         entries = frame->entries;
825 @@ -2487,6 +2829,12 @@ again:
826                 struct dx_node *node2;
827                 struct buffer_head *bh2;
828  
829 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
830 +                       ext4_htree_safe_relock(lck);
831 +                       restart = 1;
832 +                       goto cleanup;
833 +               }
834 +
835                 while (frame > frames) {
836                         if (dx_get_count((frame - 1)->entries) <
837                             dx_get_limit((frame - 1)->entries)) {
838 @@ -2589,8 +2937,32 @@ again:
839                         restart = 1;
840                         goto journal_error;
841                 }
842 +       } else if (!ext4_htree_dx_locked(lck)) {
843 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
844 +
845 +               /* not well protected, require DX lock */
846 +               ext4_htree_dx_need_lock(lck);
847 +               at = frame > frames ? (frame - 1)->at : NULL;
848 +
849 +               /* NB: no risk of deadlock because it's just a try.
850 +                *
851 +                * NB: we check ld_count for twice, the first time before
852 +                * having DX lock, the second time after holding DX lock.
853 +                *
854 +                * NB: We never free blocks for directory so far, which
855 +                * means value returned by dx_get_count() should equal to
856 +                * ld->ld_count if nobody split any DE-block under @at,
857 +                * and ld->ld_at still points to valid dx_entry. */
858 +               if ((ld->ld_count != dx_get_count(entries)) ||
859 +                   !ext4_htree_dx_lock_try(lck, at) ||
860 +                   (ld->ld_count != dx_get_count(entries))) {
861 +                       restart = 1;
862 +                       goto cleanup;
863 +               }
864 +               /* OK, I've got DX lock and nothing changed */
865 +               frame->at = ld->ld_at;
866         }
867 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
868 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
869         if (IS_ERR(de)) {
870                 err = PTR_ERR(de);
871                 goto cleanup;
872 @@ -2601,6 +2973,8 @@ again:
873  journal_error:
874         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
875  cleanup:
876 +       ext4_htree_dx_unlock(lck);
877 +       ext4_htree_de_unlock(lck);
878         brelse(bh);
879         dx_release(frames);
880         /* @restart is true means htree-path has been changed, we need to
881 --- a/fs/ext4/super.c
882 +++ b/fs/ext4/super.c
883 @@ -1122,6 +1122,7 @@ static struct inode *ext4_alloc_inode(st
884  
885         inode_set_iversion(&ei->vfs_inode, 1);
886         spin_lock_init(&ei->i_raw_lock);
887 +       sema_init(&ei->i_append_sem, 1);
888         INIT_LIST_HEAD(&ei->i_prealloc_list);
889         spin_lock_init(&ei->i_prealloc_lock);
890         ext4_es_init_tree(&ei->i_es_tree);