Whamcloud - gitweb
LU-17599 ldiskfs: restore ldiskfs patch attribution
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel9.1 / ext4-pdirop.patch
1 LU-50 ldiskfs: pdirops patch for ldiskfs
2
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
14
15 This patch contains:
16   - pdirops support for ldiskfs
17   - N-level htree directory
18   - integrate with osd-ldiskfs
19
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
24 ---
25  fs/ext4/Makefile |   1 +
26  fs/ext4/ext4.h   |  78 ++++++++
27  fs/ext4/namei.c  | 465 ++++++++++++++++++++++++++++++++++++++++++-----
28  fs/ext4/super.c  |   1 +
29  4 files changed, 504 insertions(+), 41 deletions(-)
30
31 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
32 index 7d89142..e0dea46 100644
33 --- a/fs/ext4/Makefile
34 +++ b/fs/ext4/Makefile
35 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
36  
37  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
38                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
39 +               htree_lock.o \
40                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
41                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
42                 super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
43 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44 index 73f594d..febe183 100644
45 --- a/fs/ext4/ext4.h
46 +++ b/fs/ext4/ext4.h
47 @@ -29,6 +29,7 @@
48  #include <linux/mutex.h>
49  #include <linux/timer.h>
50  #include <linux/wait.h>
51 +#include <linux/htree_lock.h>
52  #include <linux/sched/signal.h>
53  #include <linux/blockgroup_lock.h>
54  #include <linux/percpu_counter.h>
55 @@ -1019,6 +1020,9 @@ struct ext4_inode_info {
56         __u32   i_dtime;
57         ext4_fsblk_t    i_file_acl;
58  
59 +       /* following fields for parallel directory operations -bzzz */
60 +       struct semaphore i_append_sem;
61 +
62         /*
63          * i_block_group is the number of the block group which contains
64          * this file's inode.  Constant across the lifetime of the inode,
65 @@ -2554,6 +2558,72 @@ struct dx_hash_info
66   */
67  #define HASH_NB_ALWAYS         1
68  
69 +/* assume name-hash is protected by upper layer */
70 +#define EXT4_HTREE_LOCK_HASH   0
71 +
72 +enum ext4_pdo_lk_types {
73 +#if EXT4_HTREE_LOCK_HASH
74 +       EXT4_LK_HASH,
75 +#endif
76 +       EXT4_LK_DX,             /* index block */
77 +       EXT4_LK_DE,             /* directory entry block */
78 +       EXT4_LK_SPIN,           /* spinlock */
79 +       EXT4_LK_MAX,
80 +};
81 +
82 +/* read-only bit */
83 +#define EXT4_LB_RO(b)          (1 << (b))
84 +/* read + write, high bits for writer */
85 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
86 +
87 +enum ext4_pdo_lock_bits {
88 +       /* DX lock bits */
89 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
90 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
91 +       /* DE lock bits */
92 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
93 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
94 +       /* DX spinlock bits */
95 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
96 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
97 +       /* accurate searching */
98 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
99 +};
100 +
101 +enum ext4_pdo_lock_opc {
102 +       /* external */
103 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
104 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
105 +                                  EXT4_LB_EXACT),
106 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
107 +                                  EXT4_LB_EXACT),
108 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
109 +
110 +       /* internal */
111 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
112 +                                  EXT4_LB_EXACT),
113 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
114 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
115 +};
116 +
117 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
118 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
119 +
120 +extern struct htree_lock *ext4_htree_lock_alloc(void);
121 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
122 +
123 +extern void ext4_htree_lock(struct htree_lock *lck,
124 +                           struct htree_lock_head *lhead,
125 +                           struct inode *dir, unsigned flags);
126 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
127 +
128 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
129 +                                       const struct qstr *d_name,
130 +                                       struct ext4_dir_entry_2 **res_dir,
131 +                                       int *inlined, struct htree_lock *lck);
132 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
133 +                     struct inode *inode, struct htree_lock *lck);
134 +
135  struct ext4_filename {
136         const struct qstr *usr_fname;
137         struct fscrypt_str disk_name;
138 @@ -2932,12 +3002,20 @@ void ext4_insert_dentry(struct inode *dir, struct inode *inode,
139                         void *data);
140  static inline void ext4_update_dx_flag(struct inode *inode)
141  {
142 +       /* Disable it for ldiskfs, because going from a DX directory to
143 +        * a non-DX directory while it is in use will completely break
144 +        * the htree-locking.
145 +        * If we really want to support this operation in the future,
146 +        * we need to exclusively lock the directory at here which will
147 +        * increase complexity of code */
148 +#if 0
149         if (!ext4_has_feature_dir_index(inode->i_sb) &&
150             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
151                 /* ext4_iget() should have caught this... */
152                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
153                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
154         }
155 +#endif
156  }
157  static const unsigned char ext4_filetype_table[] = {
158         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
159 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
160 index 2760dc6..2d14bd2 100644
161 --- a/fs/ext4/namei.c
162 +++ b/fs/ext4/namei.c
163 @@ -56,6 +56,7 @@ struct buffer_head *ext4_append(handle_t *handle,
164  {
165         struct ext4_map_blocks map;
166         struct buffer_head *bh;
167 +       struct ext4_inode_info *ei = EXT4_I(inode);
168         int err;
169  
170         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
171 @@ -63,6 +64,10 @@ struct buffer_head *ext4_append(handle_t *handle,
172                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
173                 return ERR_PTR(-ENOSPC);
174  
175 +       /* with parallel dir operations all appends
176 +       * have to be serialized -bzzz */
177 +       down(&ei->i_append_sem);
178 +
179         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
180         map.m_lblk = *block;
181         map.m_len = 1;
182 @@ -73,21 +78,27 @@ struct buffer_head *ext4_append(handle_t *handle,
183          * directory.
184          */
185         err = ext4_map_blocks(NULL, inode, &map, 0);
186 -       if (err < 0)
187 +       if (err < 0) {
188 +               up(&ei->i_append_sem);
189                 return ERR_PTR(err);
190 +       }
191         if (err) {
192 +               up(&ei->i_append_sem);
193                 EXT4_ERROR_INODE(inode, "Logical block already allocated");
194                 return ERR_PTR(-EFSCORRUPTED);
195         }
196  
197         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
198 -       if (IS_ERR(bh))
199 +       if (IS_ERR(bh)) {
200 +               up(&ei->i_append_sem);
201                 return bh;
202 +       }
203         inode->i_size += inode->i_sb->s_blocksize;
204         EXT4_I(inode)->i_disksize = inode->i_size;
205         BUFFER_TRACE(bh, "get_write_access");
206         err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
207                                             EXT4_JTR_NONE);
208 +       up(&ei->i_append_sem);
209         if (err) {
210                 brelse(bh);
211                 ext4_std_error(inode->i_sb, err);
212 @@ -291,7 +302,8 @@ static unsigned dx_node_limit(struct inode *dir);
213  static struct dx_frame *dx_probe(struct ext4_filename *fname,
214                                  struct inode *dir,
215                                  struct dx_hash_info *hinfo,
216 -                                struct dx_frame *frame);
217 +                                struct dx_frame *frame,
218 +                                struct htree_lock *lck);
219  static void dx_release(struct dx_frame *frames, struct inode *dir);
220  static int dx_make_map(struct inode *dir, struct buffer_head *bh,
221                        struct dx_hash_info *hinfo,
222 @@ -307,12 +319,13 @@ static void dx_insert_block(struct dx_frame *frame,
223  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
224                                  struct dx_frame *frame,
225                                  struct dx_frame *frames,
226 -                                __u32 *start_hash);
227 +                                __u32 *start_hash, struct htree_lock *lck);
228  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
229                 struct ext4_filename *fname,
230 -               struct ext4_dir_entry_2 **res_dir);
231 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
232  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
233 -                            struct inode *dir, struct inode *inode);
234 +                            struct inode *dir, struct inode *inode,
235 +                            struct htree_lock *lck);
236  
237  /* checksumming functions */
238  void ext4_initialize_dirent_tail(struct buffer_head *bh,
239 @@ -797,6 +810,227 @@ static inline void htree_rep_invariant_check(struct dx_entry *at,
240  }
241  #endif /* DX_DEBUG */
242  
243 +/* private data for htree_lock */
244 +struct ext4_dir_lock_data {
245 +       unsigned                ld_flags;  /* bits-map for lock types */
246 +       unsigned                ld_count;  /* # entries of the last DX block */
247 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
248 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
249 +};
250 +
251 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
252 +#define ext4_find_entry(dir, name, dirent, inline) \
253 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
254 +#define ext4_add_entry(handle, dentry, inode) \
255 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
256 +
257 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
258 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
259 +
260 +static void ext4_htree_event_cb(void *target, void *event)
261 +{
262 +       u64 *block = (u64 *)target;
263 +
264 +       if (*block == dx_get_block((struct dx_entry *)event))
265 +               *block = EXT4_HTREE_NODE_CHANGED;
266 +}
267 +
268 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
269 +{
270 +       struct htree_lock_head *lhead;
271 +
272 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
273 +       if (lhead != NULL) {
274 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
275 +                                       ext4_htree_event_cb);
276 +       }
277 +       return lhead;
278 +}
279 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
280 +
281 +struct htree_lock *ext4_htree_lock_alloc(void)
282 +{
283 +       return htree_lock_alloc(EXT4_LK_MAX,
284 +                               sizeof(struct ext4_dir_lock_data));
285 +}
286 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
287 +
288 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
289 +{
290 +       switch (flags) {
291 +       default: /* 0 or unknown flags require EX lock */
292 +               return HTREE_LOCK_EX;
293 +       case EXT4_HLOCK_READDIR:
294 +               return HTREE_LOCK_PR;
295 +       case EXT4_HLOCK_LOOKUP:
296 +               return HTREE_LOCK_CR;
297 +       case EXT4_HLOCK_DEL:
298 +       case EXT4_HLOCK_ADD:
299 +               return HTREE_LOCK_CW;
300 +       }
301 +}
302 +
303 +/* return PR for read-only operations, otherwise return EX */
304 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
305 +{
306 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
307 +
308 +       /* 0 requires EX lock */
309 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
310 +}
311 +
312 +static int ext4_htree_safe_locked(struct htree_lock *lck)
313 +{
314 +       int writer;
315 +
316 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
317 +               return 1;
318 +
319 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
320 +                EXT4_LB_DE;
321 +       if (writer) /* all readers & writers are excluded? */
322 +               return lck->lk_mode == HTREE_LOCK_EX;
323 +
324 +       /* all writers are excluded? */
325 +       return lck->lk_mode == HTREE_LOCK_PR ||
326 +              lck->lk_mode == HTREE_LOCK_PW ||
327 +              lck->lk_mode == HTREE_LOCK_EX;
328 +}
329 +
330 +/* relock htree_lock with EX mode if it's change operation, otherwise
331 + * relock it with PR mode. It's noop if PDO is disabled. */
332 +static void ext4_htree_safe_relock(struct htree_lock *lck)
333 +{
334 +       if (!ext4_htree_safe_locked(lck)) {
335 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
336 +
337 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
338 +       }
339 +}
340 +
341 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
342 +                    struct inode *dir, unsigned flags)
343 +{
344 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
345 +                                             ext4_htree_safe_mode(flags);
346 +
347 +       ext4_htree_lock_data(lck)->ld_flags = flags;
348 +       htree_lock(lck, lhead, mode);
349 +       if (!is_dx(dir))
350 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
351 +}
352 +EXPORT_SYMBOL(ext4_htree_lock);
353 +
354 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
355 +                               unsigned lmask, int wait, void *ev)
356 +{
357 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
358 +       u32     mode;
359 +
360 +       /* NOOP if htree is well protected or caller doesn't require the lock */
361 +       if (ext4_htree_safe_locked(lck) ||
362 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
363 +               return 1;
364 +
365 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
366 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
367 +       while (1) {
368 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
369 +                       return 1;
370 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
371 +                       return 0;
372 +               cpu_relax(); /* spin until granted */
373 +       }
374 +}
375 +
376 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
377 +{
378 +       return ext4_htree_safe_locked(lck) ||
379 +              htree_node_is_granted(lck, ffz(~lmask));
380 +}
381 +
382 +static void ext4_htree_node_unlock(struct htree_lock *lck,
383 +                                  unsigned lmask, void *buf)
384 +{
385 +       /* NB: it's safe to call mutiple times or even it's not locked */
386 +       if (!ext4_htree_safe_locked(lck) &&
387 +            htree_node_is_granted(lck, ffz(~lmask)))
388 +               htree_node_unlock(lck, ffz(~lmask), buf);
389 +}
390 +
391 +#define ext4_htree_dx_lock(lck, key)           \
392 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
393 +#define ext4_htree_dx_lock_try(lck, key)       \
394 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
395 +#define ext4_htree_dx_unlock(lck)              \
396 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
397 +#define ext4_htree_dx_locked(lck)              \
398 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
399 +
400 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
401 +{
402 +       struct ext4_dir_lock_data *ld;
403 +
404 +       if (ext4_htree_safe_locked(lck))
405 +               return;
406 +
407 +       ld = ext4_htree_lock_data(lck);
408 +       switch (ld->ld_flags) {
409 +       default:
410 +               return;
411 +       case EXT4_HLOCK_LOOKUP:
412 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
413 +               return;
414 +       case EXT4_HLOCK_DEL:
415 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
416 +               return;
417 +       case EXT4_HLOCK_ADD:
418 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
419 +               return;
420 +       }
421 +}
422 +
423 +#define ext4_htree_de_lock(lck, key)           \
424 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
425 +#define ext4_htree_de_unlock(lck)              \
426 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
427 +
428 +#define ext4_htree_spin_lock(lck, key, event)  \
429 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
430 +#define ext4_htree_spin_unlock(lck)            \
431 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
432 +#define ext4_htree_spin_unlock_listen(lck, p)  \
433 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
434 +
435 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
436 +{
437 +       if (!ext4_htree_safe_locked(lck) &&
438 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
439 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
440 +}
441 +
442 +enum {
443 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
444 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
445 +       DX_HASH_COL_NO,         /* there is no collision */
446 +};
447 +
448 +static int dx_probe_hash_collision(struct htree_lock *lck,
449 +                                  struct dx_entry *entries,
450 +                                  struct dx_entry *at, u32 hash)
451 +{
452 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
453 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
454 +
455 +       } else if (at == entries + dx_get_count(entries) - 1) {
456 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
457 +
458 +       } else { /* hash collision? */
459 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
460 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
461 +       }
462 +}
463 +
464  /*
465   * Probe for a directory leaf block to search.
466   *
467 @@ -808,10 +1042,11 @@ static inline void htree_rep_invariant_check(struct dx_entry *at,
468   */
469  static struct dx_frame *
470  dx_probe(struct ext4_filename *fname, struct inode *dir,
471 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
472 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
473 +        struct htree_lock *lck)
474  {
475         unsigned count, indirect, level, i;
476 -       struct dx_entry *at, *entries, *p, *q, *m;
477 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
478         struct dx_root_info *info;
479         struct dx_frame *frame = frame_in;
480         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
481 @@ -895,8 +1130,16 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
482         level = 0;
483         blocks[0] = 0;
484         while (1) {
485 +               if (indirect == level) { /* the last index level */
486 +                       /* NB: ext4_htree_dx_lock() could be noop if
487 +                        * DX-lock flag is not set for current operation
488 +                        */
489 +                       ext4_htree_dx_lock(lck, dx);
490 +                       ext4_htree_spin_lock(lck, dx, NULL);
491 +               }
492                 count = dx_get_count(entries);
493                 if (!count || count > dx_get_limit(entries)) {
494 +                       ext4_htree_spin_unlock(lck); /* release spin */
495                         ext4_warning_inode(dir,
496                                            "dx entry: count %u beyond limit %u",
497                                            count, dx_get_limit(entries));
498 @@ -923,6 +1166,74 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
499                 frame->entries = entries;
500                 frame->at = at;
501  
502 +               if (indirect == level) { /* the last index level */
503 +                       struct ext4_dir_lock_data *ld;
504 +                       u64 myblock;
505 +
506 +                       /* By default we only lock DE-block, however, we will
507 +                        * also lock the last level DX-block if:
508 +                        * a) there is hash collision
509 +                        *    we will set DX-lock flag (a few lines below)
510 +                        *    and redo to lock DX-block
511 +                        *    see detail in dx_probe_hash_collision()
512 +                        * b) it's a retry from splitting
513 +                        *    we need to lock the last level DX-block so nobody
514 +                        *    else can split any leaf blocks under the same
515 +                        *    DX-block, see detail in ext4_dx_add_entry()
516 +                        */
517 +                       if (ext4_htree_dx_locked(lck)) {
518 +                               /* DX-block is locked, just lock DE-block
519 +                                * and return
520 +                                */
521 +                               ext4_htree_spin_unlock(lck);
522 +                               if (!ext4_htree_safe_locked(lck))
523 +                                       ext4_htree_de_lock(lck, frame->at);
524 +                               return frame;
525 +                       }
526 +                       /* it's pdirop and no DX lock */
527 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
528 +                           DX_HASH_COL_YES) {
529 +                               /* found hash collision, set DX-lock flag
530 +                                * and retry to abtain DX-lock
531 +                                */
532 +                               ext4_htree_spin_unlock(lck);
533 +                               ext4_htree_dx_need_lock(lck);
534 +                               continue;
535 +                       }
536 +                       ld = ext4_htree_lock_data(lck);
537 +                       /* because I don't lock DX, so @at can't be trusted
538 +                        * after I release spinlock so I have to save it
539 +                        */
540 +                       ld->ld_at = at;
541 +                       ld->ld_at_entry = *at;
542 +                       ld->ld_count = dx_get_count(entries);
543 +
544 +                       frame->at = &ld->ld_at_entry;
545 +                       myblock = dx_get_block(at);
546 +
547 +                       /* NB: ordering locking */
548 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
549 +                       /* other thread can split this DE-block because:
550 +                        * a) I don't have lock for the DE-block yet
551 +                        * b) I released spinlock on DX-block
552 +                        * if it happened I can detect it by listening
553 +                        * splitting event on this DE-block
554 +                        */
555 +                       ext4_htree_de_lock(lck, frame->at);
556 +                       ext4_htree_spin_stop_listen(lck);
557 +
558 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
559 +                               /* someone split this DE-block before
560 +                                * I locked it, I need to retry and lock
561 +                                * valid DE-block
562 +                                */
563 +                               ext4_htree_de_unlock(lck);
564 +                               continue;
565 +                       }
566 +                       return frame;
567 +               }
568 +               dx = at;
569 +
570                 block = dx_get_block(at);
571                 for (i = 0; i <= level; i++) {
572                         if (blocks[i] == block) {
573 @@ -932,8 +1243,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
574                                 goto fail;
575                         }
576                 }
577 -               if (++level > indirect)
578 -                       return frame;
579 +               ++level;
580                 blocks[level] = block;
581                 frame++;
582                 frame->bh = ext4_read_dirblock(dir, block, INDEX);
583 @@ -1004,7 +1314,7 @@ static void dx_release(struct dx_frame *frames, struct inode *dir)
584  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
585                                  struct dx_frame *frame,
586                                  struct dx_frame *frames,
587 -                                __u32 *start_hash)
588 +                                __u32 *start_hash, struct htree_lock *lck)
589  {
590         struct dx_frame *p;
591         struct buffer_head *bh;
592 @@ -1019,12 +1329,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
593          * this loop, num_frames indicates the number of interior
594          * nodes need to be read.
595          */
596 +       ext4_htree_de_unlock(lck);
597         while (1) {
598 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
599 -                       break;
600 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
601 +                       /* num_frames > 0 :
602 +                        *   DX block
603 +                        * ext4_htree_dx_locked:
604 +                        *   frame->at is reliable pointer returned by dx_probe,
605 +                        *   otherwise dx_probe already knew no collision */
606 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
607 +                               break;
608 +               }
609                 if (p == frames)
610                         return 0;
611                 num_frames++;
612 +               if (num_frames == 1)
613 +                       ext4_htree_dx_unlock(lck);
614                 p--;
615         }
616  
617 @@ -1047,6 +1367,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
618          * block so no check is necessary
619          */
620         while (num_frames--) {
621 +               if (num_frames == 0) {
622 +                       /* it's not always necessary, we just don't want to
623 +                        * detect hash collision again */
624 +                       ext4_htree_dx_need_lock(lck);
625 +                       ext4_htree_dx_lock(lck, p->at);
626 +               }
627 +
628                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
629                 if (IS_ERR(bh))
630                         return PTR_ERR(bh);
631 @@ -1055,6 +1382,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
632                 p->bh = bh;
633                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
634         }
635 +       ext4_htree_de_lock(lck, p->at);
636         return 1;
637  }
638  
639 @@ -1216,10 +1544,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
640         }
641         hinfo.hash = start_hash;
642         hinfo.minor_hash = 0;
643 -       frame = dx_probe(NULL, dir, &hinfo, frames);
644 +       /* assume it's PR locked */
645 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
646         if (IS_ERR(frame))
647                 return PTR_ERR(frame);
648 -
649         /* Add '.' and '..' from the htree header */
650         if (!start_hash && !start_minor_hash) {
651                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
652 @@ -1259,7 +1587,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
653                 count += ret;
654                 hashval = ~0;
655                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
656 -                                           frame, frames, &hashval);
657 +                                           frame, frames, &hashval, NULL);
658                 *next_hash = hashval;
659                 if (ret < 0) {
660                         err = ret;
661 @@ -1579,7 +1907,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
662  static struct buffer_head *__ext4_find_entry(struct inode *dir,
663                                              struct ext4_filename *fname,
664                                              struct ext4_dir_entry_2 **res_dir,
665 -                                            int *inlined)
666 +                                            int *inlined, struct htree_lock *lck)
667  {
668         struct super_block *sb;
669         struct buffer_head *bh_use[NAMEI_RA_SIZE];
670 @@ -1621,7 +1949,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
671                 goto restart;
672         }
673         if (is_dx(dir)) {
674 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
675 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
676                 /*
677                  * On success, or if the error was file not found,
678                  * return.  Otherwise, fall back to doing a search the
679 @@ -1631,6 +1959,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
680                         goto cleanup_and_exit;
681                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
682                                "falling back\n"));
683 +               ext4_htree_safe_relock(lck);
684                 ret = NULL;
685         }
686         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
687 @@ -1721,10 +2050,10 @@ cleanup_and_exit:
688         return ret;
689  }
690  
691 -static struct buffer_head *ext4_find_entry(struct inode *dir,
692 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
693                                            const struct qstr *d_name,
694                                            struct ext4_dir_entry_2 **res_dir,
695 -                                          int *inlined)
696 +                                          int *inlined, struct htree_lock *lck)
697  {
698         int err;
699         struct ext4_filename fname;
700 @@ -1736,12 +2065,14 @@ static struct buffer_head *ext4_find_entry(struct inode *dir,
701         if (err)
702                 return ERR_PTR(err);
703  
704 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
705 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
706  
707         ext4_fname_free_filename(&fname);
708         return bh;
709  }
710  
711 +EXPORT_SYMBOL(ext4_find_entry_locked);
712 +
713  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
714                                              struct dentry *dentry,
715                                              struct ext4_dir_entry_2 **res_dir)
716 @@ -1757,7 +2088,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
717         if (err)
718                 return ERR_PTR(err);
719  
720 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
721 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
722  
723         ext4_fname_free_filename(&fname);
724         return bh;
725 @@ -1765,7 +2096,8 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
726  
727  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
728                         struct ext4_filename *fname,
729 -                       struct ext4_dir_entry_2 **res_dir)
730 +                       struct ext4_dir_entry_2 **res_dir,
731 +                       struct htree_lock *lck)
732  {
733         struct super_block * sb = dir->i_sb;
734         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
735 @@ -1776,7 +2108,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
736  #ifdef CONFIG_FS_ENCRYPTION
737         *res_dir = NULL;
738  #endif
739 -       frame = dx_probe(fname, dir, NULL, frames);
740 +       frame = dx_probe(fname, dir, NULL, frames, lck);
741         if (IS_ERR(frame))
742                 return (struct buffer_head *) frame;
743         do {
744 @@ -1798,7 +2130,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
745  
746                 /* Check to see if we should continue to search */
747                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
748 -                                              frames, NULL);
749 +                                              frames, NULL, lck);
750                 if (retval < 0) {
751                         ext4_warning_inode(dir,
752                                 "error %d reading directory index block",
753 @@ -1987,8 +2319,9 @@ static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
754   * Returns pointer to de in block into which the new entry will be inserted.
755   */
756  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
757 -                       struct buffer_head **bh,struct dx_frame *frame,
758 -                       struct dx_hash_info *hinfo)
759 +                       struct buffer_head **bh, struct dx_frame *frames,
760 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
761 +                       struct htree_lock *lck)
762  {
763         unsigned blocksize = dir->i_sb->s_blocksize;
764         unsigned continued;
765 @@ -2065,8 +2398,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
766                                         hash2, split, count-split));
767  
768         /* Fancy dance to stay within two buffers */
769 -       de2 = dx_move_dirents(dir, data1, data2, map + split, count - split,
770 -                             blocksize);
771 +       if (hinfo->hash < hash2) {
772 +               de2 = dx_move_dirents(dir, data1, data2, map + split,
773 +                                     count - split, blocksize);
774 +       } else {
775 +               /* make sure we will add entry to the same block which
776 +                * we have already locked */
777 +               de2 = dx_move_dirents(dir, data1, data2, map, split, blocksize);
778 +       }
779         de = dx_pack_dirents(dir, data1, blocksize);
780         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
781                                            (char *) de,
782 @@ -2084,12 +2423,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
783         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
784                         blocksize, 1));
785  
786 -       /* Which block gets the new entry? */
787 -       if (hinfo->hash >= hash2) {
788 -               swap(*bh, bh2);
789 -               de = de2;
790 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
791 +                            frame->at); /* notify block is being split */
792 +       if (hinfo->hash < hash2) {
793 +               dx_insert_block(frame, hash2 + continued, newblock);
794 +
795 +       } else {
796 +               /* switch block number */
797 +               dx_insert_block(frame, hash2 + continued,
798 +                               dx_get_block(frame->at));
799 +               dx_set_block(frame->at, newblock);
800 +               (frame->at)++;
801         }
802 -       dx_insert_block(frame, hash2 + continued, newblock);
803 +       ext4_htree_spin_unlock(lck);
804 +       ext4_htree_dx_unlock(lck);
805 +
806         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
807         if (err)
808                 goto journal_error;
809 @@ -2388,7 +2736,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
810         if (retval)
811                 goto out_frames;
812  
813 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
814 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
815         if (IS_ERR(de)) {
816                 retval = PTR_ERR(de);
817                 goto out_frames;
818 @@ -2498,8 +2846,8 @@ out:
819   * may not sleep between calling this and putting something into
820   * the entry, as someone else might have used it while you slept.
821   */
822 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
823 -                         struct inode *inode)
824 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
825 +                         struct inode *inode, struct htree_lock *lck)
826  {
827         struct inode *dir = d_inode(dentry->d_parent);
828         struct buffer_head *bh = NULL;
829 @@ -2548,9 +2896,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
830                 if (dentry->d_name.len == 2 &&
831                     memcmp(dentry->d_name.name, "..", 2) == 0)
832                         return ext4_update_dotdot(handle, dentry, inode);
833 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
834 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
835                 if (!retval || (retval != ERR_BAD_DX_DIR))
836                         goto out;
837 +               ext4_htree_safe_relock(lck);
838                 /* Can we just ignore htree data? */
839                 if (ext4_has_metadata_csum(sb)) {
840                         EXT4_ERROR_INODE(dir,
841 @@ -2613,12 +2962,14 @@ out:
842                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
843         return retval;
844  }
845 +EXPORT_SYMBOL(ext4_add_entry_locked);
846  
847  /*
848   * Returns 0 for success, or a negative error value
849   */
850  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
851 -                            struct inode *dir, struct inode *inode)
852 +                            struct inode *dir, struct inode *inode,
853 +                            struct htree_lock *lck)
854  {
855         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
856         struct dx_entry *entries, *at;
857 @@ -2630,7 +2981,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
858  
859  again:
860         restart = 0;
861 -       frame = dx_probe(fname, dir, NULL, frames);
862 +       frame = dx_probe(fname, dir, NULL, frames, lck);
863         if (IS_ERR(frame))
864                 return PTR_ERR(frame);
865         entries = frame->entries;
866 @@ -2665,6 +3016,12 @@ again:
867                 struct dx_node *node2;
868                 struct buffer_head *bh2;
869  
870 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
871 +                       ext4_htree_safe_relock(lck);
872 +                       restart = 1;
873 +                       goto cleanup;
874 +               }
875 +
876                 while (frame > frames) {
877                         if (dx_get_count((frame - 1)->entries) <
878                             dx_get_limit((frame - 1)->entries)) {
879 @@ -2768,8 +3125,32 @@ again:
880                         restart = 1;
881                         goto journal_error;
882                 }
883 +       } else if (!ext4_htree_dx_locked(lck)) {
884 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
885 +
886 +               /* not well protected, require DX lock */
887 +               ext4_htree_dx_need_lock(lck);
888 +               at = frame > frames ? (frame - 1)->at : NULL;
889 +
890 +               /* NB: no risk of deadlock because it's just a try.
891 +                *
892 +                * NB: we check ld_count for twice, the first time before
893 +                * having DX lock, the second time after holding DX lock.
894 +                *
895 +                * NB: We never free blocks for directory so far, which
896 +                * means value returned by dx_get_count() should equal to
897 +                * ld->ld_count if nobody split any DE-block under @at,
898 +                * and ld->ld_at still points to valid dx_entry. */
899 +               if ((ld->ld_count != dx_get_count(entries)) ||
900 +                   !ext4_htree_dx_lock_try(lck, at) ||
901 +                   (ld->ld_count != dx_get_count(entries))) {
902 +                       restart = 1;
903 +                       goto cleanup;
904 +               }
905 +               /* OK, I've got DX lock and nothing changed */
906 +               frame->at = ld->ld_at;
907         }
908 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
909 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
910         if (IS_ERR(de)) {
911                 err = PTR_ERR(de);
912                 goto cleanup;
913 @@ -2780,6 +3161,8 @@ again:
914  journal_error:
915         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
916  cleanup:
917 +       ext4_htree_dx_unlock(lck);
918 +       ext4_htree_de_unlock(lck);
919         brelse(bh);
920         dx_release(frames, dir);
921         /* @restart is true means htree-path has been changed, we need to
922 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
923 index b271e1f..cda88f8 100644
924 --- a/fs/ext4/super.c
925 +++ b/fs/ext4/super.c
926 @@ -1320,6 +1320,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
927  
928         inode_set_iversion(&ei->vfs_inode, 1);
929         spin_lock_init(&ei->i_raw_lock);
930 +       sema_init(&ei->i_append_sem, 1);
931         INIT_LIST_HEAD(&ei->i_prealloc_list);
932         atomic_set(&ei->i_prealloc_active, 0);
933         spin_lock_init(&ei->i_prealloc_lock);
934 -- 
935 2.34.1
936