Whamcloud - gitweb
LU-11851 ldiskfs: reschedule for htree thread.
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ubuntu18 / ext4-pdirop.patch
1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
12
13 This patch contains:
14  - pdirops support for ldiskfs
15  - integrate with osd-ldiskfs
16
17 Index: linux-4.15.0/fs/ext4/Makefile
18 ===================================================================
19 --- linux-4.15.0.orig/fs/ext4/Makefile
20 +++ linux-4.15.0/fs/ext4/Makefile
21 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
22  
23  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
24                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
25 +               htree_lock.o \
26                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
27                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
28                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
29 Index: linux-4.15.0/fs/ext4/ext4.h
30 ===================================================================
31 --- linux-4.15.0.orig/fs/ext4/ext4.h
32 +++ linux-4.15.0/fs/ext4/ext4.h
33 @@ -29,6 +29,7 @@
34  #include <linux/timer.h>
35  #include <linux/version.h>
36  #include <linux/wait.h>
37 +#include <linux/htree_lock.h>
38  #include <linux/sched/signal.h>
39  #include <linux/blockgroup_lock.h>
40  #include <linux/percpu_counter.h>
41 @@ -932,6 +933,9 @@ struct ext4_inode_info {
42         __u32   i_dtime;
43         ext4_fsblk_t    i_file_acl;
44  
45 +       /* following fields for parallel directory operations -bzzz */
46 +       struct semaphore i_append_sem;
47 +
48         /*
49          * i_block_group is the number of the block group which contains
50          * this file's inode.  Constant across the lifetime of the inode,
51 @@ -2114,6 +2118,72 @@ struct dx_hash_info
52   */
53  #define HASH_NB_ALWAYS         1
54  
55 +/* assume name-hash is protected by upper layer */
56 +#define EXT4_HTREE_LOCK_HASH   0
57 +
58 +enum ext4_pdo_lk_types {
59 +#if EXT4_HTREE_LOCK_HASH
60 +       EXT4_LK_HASH,
61 +#endif
62 +       EXT4_LK_DX,             /* index block */
63 +       EXT4_LK_DE,             /* directory entry block */
64 +       EXT4_LK_SPIN,           /* spinlock */
65 +       EXT4_LK_MAX,
66 +};
67 +
68 +/* read-only bit */
69 +#define EXT4_LB_RO(b)          (1 << (b))
70 +/* read + write, high bits for writer */
71 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
72 +
73 +enum ext4_pdo_lock_bits {
74 +       /* DX lock bits */
75 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
76 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
77 +       /* DE lock bits */
78 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
79 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
80 +       /* DX spinlock bits */
81 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
82 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
83 +       /* accurate searching */
84 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
85 +};
86 +
87 +enum ext4_pdo_lock_opc {
88 +       /* external */
89 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
90 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
91 +                                  EXT4_LB_EXACT),
92 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
93 +                                  EXT4_LB_EXACT),
94 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
95 +
96 +       /* internal */
97 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
98 +                                  EXT4_LB_EXACT),
99 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
100 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
101 +};
102 +
103 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
104 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
105 +
106 +extern struct htree_lock *ext4_htree_lock_alloc(void);
107 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
108 +
109 +extern void ext4_htree_lock(struct htree_lock *lck,
110 +                           struct htree_lock_head *lhead,
111 +                           struct inode *dir, unsigned flags);
112 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
113 +
114 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
115 +                                       const struct qstr *d_name,
116 +                                       struct ext4_dir_entry_2 **res_dir,
117 +                                       int *inlined, struct htree_lock *lck);
118 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
119 +                     struct inode *inode, struct htree_lock *lck);
120 +
121  struct ext4_filename {
122         const struct qstr *usr_fname;
123         struct fscrypt_str disk_name;
124 @@ -2421,8 +2491,16 @@ void ext4_insert_dentry(struct inode *in
125                         struct ext4_filename *fname, void *data);
126  static inline void ext4_update_dx_flag(struct inode *inode)
127  {
128 +       /* Disable it for ldiskfs, because going from a DX directory to
129 +        * a non-DX directory while it is in use will completely break
130 +        * the htree-locking.
131 +        * If we really want to support this operation in the future,
132 +        * we need to exclusively lock the directory at here which will
133 +        * increase complexity of code */
134 +#if 0
135         if (!ext4_has_feature_dir_index(inode->i_sb))
136                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
137 +#endif
138  }
139  static const unsigned char ext4_filetype_table[] = {
140         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
141 Index: linux-4.15.0/fs/ext4/htree_lock.c
142 ===================================================================
143 --- /dev/null
144 +++ linux-4.15.0/fs/ext4/htree_lock.c
145 @@ -0,0 +1,891 @@
146 +/*
147 + * fs/ext4/htree_lock.c
148 + *
149 + * Copyright (c) 2011, 2012, Intel Corporation.
150 + *
151 + * Author: Liang Zhen <liang@whamcloud.com>
152 + */
153 +#include <linux/jbd2.h>
154 +#include <linux/hash.h>
155 +#include <linux/module.h>
156 +#include <linux/htree_lock.h>
157 +
158 +enum {
159 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
160 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
161 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
162 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
163 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
164 +};
165 +
166 +enum {
167 +       HTREE_LOCK_COMPAT_EX    = 0,
168 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
169 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
170 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
171 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
172 +                                 HTREE_LOCK_BIT_PW,
173 +};
174 +
175 +static int htree_lock_compat[] = {
176 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
177 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
178 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
179 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
180 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
181 +};
182 +
183 +/* max allowed htree-lock depth.
184 + * We only need depth=3 for ext4 although user can have higher value. */
185 +#define HTREE_LOCK_DEP_MAX     16
186 +
187 +#ifdef HTREE_LOCK_DEBUG
188 +
189 +static char *hl_name[] = {
190 +       [HTREE_LOCK_EX]         "EX",
191 +       [HTREE_LOCK_PW]         "PW",
192 +       [HTREE_LOCK_PR]         "PR",
193 +       [HTREE_LOCK_CW]         "CW",
194 +       [HTREE_LOCK_CR]         "CR",
195 +};
196 +
197 +/* lock stats */
198 +struct htree_lock_node_stats {
199 +       unsigned long long      blocked[HTREE_LOCK_MAX];
200 +       unsigned long long      granted[HTREE_LOCK_MAX];
201 +       unsigned long long      retried[HTREE_LOCK_MAX];
202 +       unsigned long long      events;
203 +};
204 +
205 +struct htree_lock_stats {
206 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
207 +       unsigned long long      granted[HTREE_LOCK_MAX];
208 +       unsigned long long      blocked[HTREE_LOCK_MAX];
209 +};
210 +
211 +static struct htree_lock_stats hl_stats;
212 +
213 +void htree_lock_stat_reset(void)
214 +{
215 +       memset(&hl_stats, 0, sizeof(hl_stats));
216 +}
217 +
218 +void htree_lock_stat_print(int depth)
219 +{
220 +       int     i;
221 +       int     j;
222 +
223 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
224 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
225 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
226 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
227 +       }
228 +       for (i = 0; i < depth; i++) {
229 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
230 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
231 +                       printk(KERN_DEBUG
232 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
233 +                               hl_name[j], hl_stats.nodes[i].granted[j],
234 +                               hl_stats.nodes[i].blocked[j],
235 +                               hl_stats.nodes[i].retried[j]);
236 +               }
237 +       }
238 +}
239 +
240 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
241 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
242 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
243 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
244 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
245 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
246 +
247 +#else /* !DEBUG */
248 +
249 +void htree_lock_stat_reset(void) {}
250 +void htree_lock_stat_print(int depth) {}
251 +
252 +#define lk_grant_inc(m)              do {} while (0)
253 +#define lk_block_inc(m)              do {} while (0)
254 +#define ln_grant_inc(d, m)    do {} while (0)
255 +#define ln_block_inc(d, m)    do {} while (0)
256 +#define ln_retry_inc(d, m)    do {} while (0)
257 +#define ln_event_inc(d)              do {} while (0)
258 +
259 +#endif /* DEBUG */
260 +
261 +EXPORT_SYMBOL(htree_lock_stat_reset);
262 +EXPORT_SYMBOL(htree_lock_stat_print);
263 +
264 +#define HTREE_DEP_ROOT           (-1)
265 +
266 +#define htree_spin_lock(lhead, dep)                            \
267 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
268 +#define htree_spin_unlock(lhead, dep)                          \
269 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
270 +
271 +#define htree_key_event_ignore(child, ln)                      \
272 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
273 +
274 +static int
275 +htree_key_list_empty(struct htree_lock_node *ln)
276 +{
277 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
278 +}
279 +
280 +static void
281 +htree_key_list_del_init(struct htree_lock_node *ln)
282 +{
283 +       struct htree_lock_node *tmp = NULL;
284 +
285 +       if (!list_empty(&ln->ln_minor_list)) {
286 +               tmp = list_entry(ln->ln_minor_list.next,
287 +                                struct htree_lock_node, ln_minor_list);
288 +               list_del_init(&ln->ln_minor_list);
289 +       }
290 +
291 +       if (list_empty(&ln->ln_major_list))
292 +               return;
293 +
294 +       if (tmp == NULL) { /* not on minor key list */
295 +               list_del_init(&ln->ln_major_list);
296 +       } else {
297 +               BUG_ON(!list_empty(&tmp->ln_major_list));
298 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
299 +       }
300 +}
301 +
302 +static void
303 +htree_key_list_replace_init(struct htree_lock_node *old,
304 +                           struct htree_lock_node *new)
305 +{
306 +       if (!list_empty(&old->ln_major_list))
307 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
308 +
309 +       if (!list_empty(&old->ln_minor_list))
310 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
311 +}
312 +
313 +static void
314 +htree_key_event_enqueue(struct htree_lock_child *child,
315 +                       struct htree_lock_node *ln, int dep, void *event)
316 +{
317 +       struct htree_lock_node *tmp;
318 +
319 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
320 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
321 +       if (event == NULL || htree_key_event_ignore(child, ln))
322 +               return;
323 +
324 +       /* shouldn't be a very long list */
325 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
326 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
327 +                       ln_event_inc(dep);
328 +                       if (child->lc_callback != NULL)
329 +                               child->lc_callback(tmp->ln_ev_target, event);
330 +               }
331 +       }
332 +}
333 +
334 +static int
335 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
336 +                       unsigned dep, int wait, void *event)
337 +{
338 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
339 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
340 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
341 +
342 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
343 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
344 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
345 +        * NL is only used for listener, user can't directly require NL mode */
346 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
347 +           (curln->ln_mode != HTREE_LOCK_PW &&
348 +            newln->ln_mode != HTREE_LOCK_PW)) {
349 +               /* no conflict, attach it on granted list of @curlk */
350 +               if (curln->ln_mode != HTREE_LOCK_NL) {
351 +                       list_add(&newln->ln_granted_list,
352 +                                &curln->ln_granted_list);
353 +               } else {
354 +                       /* replace key owner */
355 +                       htree_key_list_replace_init(curln, newln);
356 +               }
357 +
358 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
359 +               htree_key_event_enqueue(child, newln, dep, event);
360 +               ln_grant_inc(dep, newln->ln_mode);
361 +               return 1; /* still hold lh_lock */
362 +       }
363 +
364 +       if (!wait) { /* can't grant and don't want to wait */
365 +               ln_retry_inc(dep, newln->ln_mode);
366 +               newln->ln_mode = HTREE_LOCK_INVAL;
367 +               return -1; /* don't wait and just return -1 */
368 +       }
369 +
370 +       newlk->lk_task = current;
371 +       set_current_state(TASK_UNINTERRUPTIBLE);
372 +       /* conflict, attach it on blocked list of curlk */
373 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
374 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
375 +       ln_block_inc(dep, newln->ln_mode);
376 +
377 +       htree_spin_unlock(newlk->lk_head, dep);
378 +       /* wait to be given the lock */
379 +       if (newlk->lk_task != NULL)
380 +               schedule();
381 +       /* granted, no doubt, wake up will set me RUNNING */
382 +       if (event == NULL || htree_key_event_ignore(child, newln))
383 +               return 0; /* granted without lh_lock */
384 +
385 +       htree_spin_lock(newlk->lk_head, dep);
386 +       htree_key_event_enqueue(child, newln, dep, event);
387 +       return 1; /* still hold lh_lock */
388 +}
389 +
390 +/*
391 + * get PR/PW access to particular tree-node according to @dep and @key,
392 + * it will return -1 if @wait is false and can't immediately grant this lock.
393 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
394 + * @event if it's not NULL.
395 + * NB: ALWAYS called holding lhead::lh_lock
396 + */
397 +static int
398 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
399 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
400 +                        int wait, void *event)
401 +{
402 +       LIST_HEAD(list);
403 +       struct htree_lock       *tmp;
404 +       struct htree_lock       *tmp2;
405 +       u16                     major;
406 +       u16                     minor;
407 +       u8                      reverse;
408 +       u8                      ma_bits;
409 +       u8                      mi_bits;
410 +
411 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
412 +       BUG_ON(htree_node_is_granted(lck, dep));
413 +
414 +       key = hash_long(key, lhead->lh_hbits);
415 +
416 +       mi_bits = lhead->lh_hbits >> 1;
417 +       ma_bits = lhead->lh_hbits - mi_bits;
418 +
419 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
420 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
421 +       lck->lk_nodes[dep].ln_mode = mode;
422 +
423 +       /*
424 +        * The major key list is an ordered list, so searches are started
425 +        * at the end of the list that is numerically closer to major_key,
426 +        * so at most half of the list will be walked (for well-distributed
427 +        * keys). The list traversal aborts early if the expected key
428 +        * location is passed.
429 +        */
430 +       reverse = (major >= (1 << (ma_bits - 1)));
431 +
432 +       if (reverse) {
433 +               list_for_each_entry_reverse(tmp,
434 +                                       &lhead->lh_children[dep].lc_list,
435 +                                       lk_nodes[dep].ln_major_list) {
436 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
437 +                               goto search_minor;
438 +
439 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
440 +                               /* attach _after_ @tmp */
441 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
442 +                                        &tmp->lk_nodes[dep].ln_major_list);
443 +                               goto out_grant_major;
444 +                       }
445 +               }
446 +
447 +               list_add(&lck->lk_nodes[dep].ln_major_list,
448 +                        &lhead->lh_children[dep].lc_list);
449 +               goto out_grant_major;
450 +
451 +       } else {
452 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
453 +                                   lk_nodes[dep].ln_major_list) {
454 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
455 +                               goto search_minor;
456 +
457 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
458 +                               /* insert _before_ @tmp */
459 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
460 +                                       &tmp->lk_nodes[dep].ln_major_list);
461 +                               goto out_grant_major;
462 +                       }
463 +               }
464 +
465 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
466 +                             &lhead->lh_children[dep].lc_list);
467 +               goto out_grant_major;
468 +       }
469 +
470 + search_minor:
471 +       /*
472 +        * NB: minor_key list doesn't have a "head", @list is just a
473 +        * temporary stub for helping list searching, make sure it's removed
474 +        * after searching.
475 +        * minor_key list is an ordered list too.
476 +        */
477 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
478 +
479 +       reverse = (minor >= (1 << (mi_bits - 1)));
480 +
481 +       if (reverse) {
482 +               list_for_each_entry_reverse(tmp2, &list,
483 +                                           lk_nodes[dep].ln_minor_list) {
484 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
485 +                               goto out_enqueue;
486 +
487 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
488 +                               /* attach _after_ @tmp2 */
489 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
490 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
491 +                               goto out_grant_minor;
492 +                       }
493 +               }
494 +
495 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
496 +
497 +       } else {
498 +               list_for_each_entry(tmp2, &list,
499 +                                   lk_nodes[dep].ln_minor_list) {
500 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
501 +                               goto out_enqueue;
502 +
503 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
504 +                               /* insert _before_ @tmp2 */
505 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
506 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
507 +                               goto out_grant_minor;
508 +                       }
509 +               }
510 +
511 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
512 +       }
513 +
514 + out_grant_minor:
515 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
516 +               /* new lock @lck is the first one on minor_key list, which
517 +                * means it has the smallest minor_key and it should
518 +                * replace @tmp as minor_key owner */
519 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
520 +                                 &lck->lk_nodes[dep].ln_major_list);
521 +       }
522 +       /* remove the temporary head */
523 +       list_del(&list);
524 +
525 + out_grant_major:
526 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
527 +       return 1; /* granted with holding lh_lock */
528 +
529 + out_enqueue:
530 +       list_del(&list); /* remove temprary head */
531 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
532 +}
533 +
534 +/*
535 + * release the key of @lck at level @dep, and grant any blocked locks.
536 + * caller will still listen on @key if @event is not NULL, which means
537 + * caller can see a event (by event_cb) while granting any lock with
538 + * the same key at level @dep.
539 + * NB: ALWAYS called holding lhead::lh_lock
540 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
541 + */
542 +static void
543 +htree_node_unlock_internal(struct htree_lock_head *lhead,
544 +                          struct htree_lock *curlk, unsigned dep, void *event)
545 +{
546 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
547 +       struct htree_lock       *grtlk = NULL;
548 +       struct htree_lock_node  *grtln;
549 +       struct htree_lock       *poslk;
550 +       struct htree_lock       *tmplk;
551 +
552 +       if (!htree_node_is_granted(curlk, dep))
553 +               return;
554 +
555 +       if (!list_empty(&curln->ln_granted_list)) {
556 +               /* there is another granted lock */
557 +               grtlk = list_entry(curln->ln_granted_list.next,
558 +                                  struct htree_lock,
559 +                                  lk_nodes[dep].ln_granted_list);
560 +               list_del_init(&curln->ln_granted_list);
561 +       }
562 +
563 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
564 +               /*
565 +                * @curlk is the only granted lock, so we confirmed:
566 +                * a) curln is key owner (attached on major/minor_list),
567 +                *    so if there is any blocked lock, it should be attached
568 +                *    on curln->ln_blocked_list
569 +                * b) we always can grant the first blocked lock
570 +                */
571 +               grtlk = list_entry(curln->ln_blocked_list.next,
572 +                                  struct htree_lock,
573 +                                  lk_nodes[dep].ln_blocked_list);
574 +               BUG_ON(grtlk->lk_task == NULL);
575 +               wake_up_process(grtlk->lk_task);
576 +       }
577 +
578 +       if (event != NULL &&
579 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
580 +               curln->ln_ev_target = event;
581 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
582 +       } else {
583 +               curln->ln_mode = HTREE_LOCK_INVAL;
584 +       }
585 +
586 +       if (grtlk == NULL) { /* I must be the only one locking this key */
587 +               struct htree_lock_node *tmpln;
588 +
589 +               BUG_ON(htree_key_list_empty(curln));
590 +
591 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
592 +                       return;
593 +
594 +               /* not listening */
595 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
596 +                       htree_key_list_del_init(curln);
597 +                       return;
598 +               }
599 +
600 +               tmpln = list_entry(curln->ln_alive_list.next,
601 +                                  struct htree_lock_node, ln_alive_list);
602 +
603 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
604 +
605 +               htree_key_list_replace_init(curln, tmpln);
606 +               list_del_init(&curln->ln_alive_list);
607 +
608 +               return;
609 +       }
610 +
611 +       /* have a granted lock */
612 +       grtln = &grtlk->lk_nodes[dep];
613 +       if (!list_empty(&curln->ln_blocked_list)) {
614 +               /* only key owner can be on both lists */
615 +               BUG_ON(htree_key_list_empty(curln));
616 +
617 +               if (list_empty(&grtln->ln_blocked_list)) {
618 +                       list_add(&grtln->ln_blocked_list,
619 +                                &curln->ln_blocked_list);
620 +               }
621 +               list_del_init(&curln->ln_blocked_list);
622 +       }
623 +       /*
624 +        * NB: this is the tricky part:
625 +        * We have only two modes for child-lock (PR and PW), also,
626 +        * only owner of the key (attached on major/minor_list) can be on
627 +        * both blocked_list and granted_list, so @grtlk must be one
628 +        * of these two cases:
629 +        *
630 +        * a) @grtlk is taken from granted_list, which means we've granted
631 +        *    more than one lock so @grtlk has to be PR, the first blocked
632 +        *    lock must be PW and we can't grant it at all.
633 +        *    So even @grtlk is not owner of the key (empty blocked_list),
634 +        *    we don't care because we can't grant any lock.
635 +        * b) we just grant a new lock which is taken from head of blocked
636 +        *    list, and it should be the first granted lock, and it should
637 +        *    be the first one linked on blocked_list.
638 +        *
639 +        * Either way, we can get correct result by iterating blocked_list
640 +        * of @grtlk, and don't have to bother on how to find out
641 +        * owner of current key.
642 +        */
643 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
644 +                                lk_nodes[dep].ln_blocked_list) {
645 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
646 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
647 +                       break;
648 +               /* grant all readers */
649 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
650 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
651 +                        &grtln->ln_granted_list);
652 +
653 +               BUG_ON(poslk->lk_task == NULL);
654 +               wake_up_process(poslk->lk_task);
655 +       }
656 +
657 +       /* if @curln is the owner of this key, replace it with @grtln */
658 +       if (!htree_key_list_empty(curln))
659 +               htree_key_list_replace_init(curln, grtln);
660 +
661 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
662 +               list_del_init(&curln->ln_alive_list);
663 +}
664 +
665 +/*
666 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
667 + * and 0 only if @wait is false and can't grant it immediately
668 + */
669 +int
670 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
671 +                   u32 key, unsigned dep, int wait, void *event)
672 +{
673 +       struct htree_lock_head *lhead = lck->lk_head;
674 +       int rc;
675 +
676 +       BUG_ON(dep >= lck->lk_depth);
677 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
678 +
679 +       htree_spin_lock(lhead, dep);
680 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
681 +       if (rc != 0)
682 +               htree_spin_unlock(lhead, dep);
683 +       return rc >= 0;
684 +}
685 +EXPORT_SYMBOL(htree_node_lock_try);
686 +
687 +/* it's wrapper of htree_node_unlock_internal */
688 +void
689 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
690 +{
691 +       struct htree_lock_head *lhead = lck->lk_head;
692 +
693 +       BUG_ON(dep >= lck->lk_depth);
694 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
695 +
696 +       htree_spin_lock(lhead, dep);
697 +       htree_node_unlock_internal(lhead, lck, dep, event);
698 +       htree_spin_unlock(lhead, dep);
699 +}
700 +EXPORT_SYMBOL(htree_node_unlock);
701 +
702 +/* stop listening on child-lock level @dep */
703 +void
704 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
705 +{
706 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
707 +       struct htree_lock_node *tmp;
708 +
709 +       BUG_ON(htree_node_is_granted(lck, dep));
710 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
711 +       BUG_ON(!list_empty(&ln->ln_granted_list));
712 +
713 +       if (!htree_node_is_listening(lck, dep))
714 +               return;
715 +
716 +       htree_spin_lock(lck->lk_head, dep);
717 +       ln->ln_mode = HTREE_LOCK_INVAL;
718 +       ln->ln_ev_target = NULL;
719 +
720 +       if (htree_key_list_empty(ln)) { /* not owner */
721 +               list_del_init(&ln->ln_alive_list);
722 +               goto out;
723 +       }
724 +
725 +       /* I'm the owner... */
726 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
727 +               htree_key_list_del_init(ln);
728 +               goto out;
729 +       }
730 +
731 +       tmp = list_entry(ln->ln_alive_list.next,
732 +                        struct htree_lock_node, ln_alive_list);
733 +
734 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
735 +       htree_key_list_replace_init(ln, tmp);
736 +       list_del_init(&ln->ln_alive_list);
737 + out:
738 +       htree_spin_unlock(lck->lk_head, dep);
739 +}
740 +EXPORT_SYMBOL(htree_node_stop_listen);
741 +
742 +/* release all child-locks if we have any */
743 +static void
744 +htree_node_release_all(struct htree_lock *lck)
745 +{
746 +       int     i;
747 +
748 +       for (i = 0; i < lck->lk_depth; i++) {
749 +               if (htree_node_is_granted(lck, i))
750 +                       htree_node_unlock(lck, i, NULL);
751 +               else if (htree_node_is_listening(lck, i))
752 +                       htree_node_stop_listen(lck, i);
753 +       }
754 +}
755 +
756 +/*
757 + * obtain htree lock, it could be blocked inside if there's conflict
758 + * with any granted or blocked lock and @wait is true.
759 + * NB: ALWAYS called holding lhead::lh_lock
760 + */
761 +static int
762 +htree_lock_internal(struct htree_lock *lck, int wait)
763 +{
764 +       struct htree_lock_head *lhead = lck->lk_head;
765 +       int     granted = 0;
766 +       int     blocked = 0;
767 +       int     i;
768 +
769 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
770 +               if (lhead->lh_ngranted[i] != 0)
771 +                       granted |= 1 << i;
772 +               if (lhead->lh_nblocked[i] != 0)
773 +                       blocked |= 1 << i;
774 +       }
775 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
776 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
777 +               /* will block current lock even it just conflicts with any
778 +                * other blocked lock, so lock like EX wouldn't starve */
779 +               if (!wait)
780 +                       return -1;
781 +               lhead->lh_nblocked[lck->lk_mode]++;
782 +               lk_block_inc(lck->lk_mode);
783 +
784 +               lck->lk_task = current;
785 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
786 +
787 +retry:
788 +               set_current_state(TASK_UNINTERRUPTIBLE);
789 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
790 +               /* wait to be given the lock */
791 +               if (lck->lk_task != NULL)
792 +                       schedule();
793 +               /* granted, no doubt. wake up will set me RUNNING.
794 +                * Since thread would be waken up accidentally,
795 +                * so we need check lock whether granted or not again. */
796 +               if (!list_empty(&lck->lk_blocked_list)) {
797 +                       htree_spin_lock(lhead, HTREE_DEP_ROOT);
798 +                       if (list_empty(&lck->lk_blocked_list)) {
799 +                               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
800 +                               return 0;
801 +                       }
802 +                       goto retry;
803 +               }
804 +               return 0; /* without lh_lock */
805 +       }
806 +       lhead->lh_ngranted[lck->lk_mode]++;
807 +       lk_grant_inc(lck->lk_mode);
808 +       return 1;
809 +}
810 +
811 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
812 +static void
813 +htree_unlock_internal(struct htree_lock *lck)
814 +{
815 +       struct htree_lock_head *lhead = lck->lk_head;
816 +       struct htree_lock *tmp;
817 +       struct htree_lock *tmp2;
818 +       int granted = 0;
819 +       int i;
820 +
821 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
822 +
823 +       lhead->lh_ngranted[lck->lk_mode]--;
824 +       lck->lk_mode = HTREE_LOCK_INVAL;
825 +
826 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
827 +               if (lhead->lh_ngranted[i] != 0)
828 +                       granted |= 1 << i;
829 +       }
830 +       list_for_each_entry_safe(tmp, tmp2,
831 +                                &lhead->lh_blocked_list, lk_blocked_list) {
832 +               /* conflict with any granted lock? */
833 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
834 +                       break;
835 +
836 +               list_del_init(&tmp->lk_blocked_list);
837 +
838 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
839 +
840 +               lhead->lh_nblocked[tmp->lk_mode]--;
841 +               lhead->lh_ngranted[tmp->lk_mode]++;
842 +               granted |= 1 << tmp->lk_mode;
843 +
844 +               BUG_ON(tmp->lk_task == NULL);
845 +               wake_up_process(tmp->lk_task);
846 +       }
847 +}
848 +
849 +/* it's wrapper of htree_lock_internal and exported interface.
850 + * It always return 1 with granted lock if @wait is true, it can return 0
851 + * if @wait is false and locking request can't be granted immediately */
852 +int
853 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
854 +              htree_lock_mode_t mode, int wait)
855 +{
856 +       int     rc;
857 +
858 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
859 +       BUG_ON(lck->lk_head != NULL);
860 +       BUG_ON(lck->lk_task != NULL);
861 +
862 +       lck->lk_head = lhead;
863 +       lck->lk_mode = mode;
864 +
865 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
866 +       rc = htree_lock_internal(lck, wait);
867 +       if (rc != 0)
868 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
869 +       return rc >= 0;
870 +}
871 +EXPORT_SYMBOL(htree_lock_try);
872 +
873 +/* it's wrapper of htree_unlock_internal and exported interface.
874 + * It will release all htree_node_locks and htree_lock */
875 +void
876 +htree_unlock(struct htree_lock *lck)
877 +{
878 +       BUG_ON(lck->lk_head == NULL);
879 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
880 +
881 +       htree_node_release_all(lck);
882 +
883 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
884 +       htree_unlock_internal(lck);
885 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
886 +       lck->lk_head = NULL;
887 +       lck->lk_task = NULL;
888 +}
889 +EXPORT_SYMBOL(htree_unlock);
890 +
891 +/* change lock mode */
892 +void
893 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
894 +{
895 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
896 +       lck->lk_mode = mode;
897 +}
898 +EXPORT_SYMBOL(htree_change_mode);
899 +
900 +/* release htree lock, and lock it again with new mode.
901 + * This function will first release all htree_node_locks and htree_lock,
902 + * then try to gain htree_lock with new @mode.
903 + * It always return 1 with granted lock if @wait is true, it can return 0
904 + * if @wait is false and locking request can't be granted immediately */
905 +int
906 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
907 +{
908 +       struct htree_lock_head *lhead = lck->lk_head;
909 +       int rc;
910 +
911 +       BUG_ON(lhead == NULL);
912 +       BUG_ON(lck->lk_mode == mode);
913 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
914 +
915 +       htree_node_release_all(lck);
916 +
917 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
918 +       htree_unlock_internal(lck);
919 +       lck->lk_mode = mode;
920 +       rc = htree_lock_internal(lck, wait);
921 +       if (rc != 0)
922 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
923 +       return rc >= 0;
924 +}
925 +EXPORT_SYMBOL(htree_change_lock_try);
926 +
927 +/* create a htree_lock head with @depth levels (number of child-locks),
928 + * it is a per resoruce structure */
929 +struct htree_lock_head *
930 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
931 +{
932 +       struct htree_lock_head *lhead;
933 +       int  i;
934 +
935 +       if (depth > HTREE_LOCK_DEP_MAX) {
936 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
937 +                       depth, HTREE_LOCK_DEP_MAX);
938 +               return NULL;
939 +       }
940 +
941 +       lhead = kzalloc(offsetof(struct htree_lock_head,
942 +                                lh_children[depth]) + priv, GFP_NOFS);
943 +       if (lhead == NULL)
944 +               return NULL;
945 +
946 +       if (hbits < HTREE_HBITS_MIN)
947 +               lhead->lh_hbits = HTREE_HBITS_MIN;
948 +       else if (hbits > HTREE_HBITS_MAX)
949 +               lhead->lh_hbits = HTREE_HBITS_MAX;
950 +
951 +       lhead->lh_lock = 0;
952 +       lhead->lh_depth = depth;
953 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
954 +       if (priv > 0) {
955 +               lhead->lh_private = (void *)lhead +
956 +                       offsetof(struct htree_lock_head, lh_children[depth]);
957 +       }
958 +
959 +       for (i = 0; i < depth; i++) {
960 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
961 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
962 +       }
963 +       return lhead;
964 +}
965 +EXPORT_SYMBOL(htree_lock_head_alloc);
966 +
967 +/* free the htree_lock head */
968 +void
969 +htree_lock_head_free(struct htree_lock_head *lhead)
970 +{
971 +       int     i;
972 +
973 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
974 +       for (i = 0; i < lhead->lh_depth; i++)
975 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
976 +       kfree(lhead);
977 +}
978 +EXPORT_SYMBOL(htree_lock_head_free);
979 +
980 +/* register event callback for @events of child-lock at level @dep */
981 +void
982 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
983 +                       unsigned events, htree_event_cb_t callback)
984 +{
985 +       BUG_ON(lhead->lh_depth <= dep);
986 +       lhead->lh_children[dep].lc_events = events;
987 +       lhead->lh_children[dep].lc_callback = callback;
988 +}
989 +EXPORT_SYMBOL(htree_lock_event_attach);
990 +
991 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
992 + * extra-bytes as private data for caller */
993 +struct htree_lock *
994 +htree_lock_alloc(unsigned depth, unsigned pbytes)
995 +{
996 +       struct htree_lock *lck;
997 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
998 +
999 +       if (depth > HTREE_LOCK_DEP_MAX) {
1000 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1001 +                       depth, HTREE_LOCK_DEP_MAX);
1002 +               return NULL;
1003 +       }
1004 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1005 +       if (lck == NULL)
1006 +               return NULL;
1007 +
1008 +       if (pbytes != 0)
1009 +               lck->lk_private = (void *)lck + i;
1010 +       lck->lk_mode = HTREE_LOCK_INVAL;
1011 +       lck->lk_depth = depth;
1012 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1013 +
1014 +       for (i = 0; i < depth; i++) {
1015 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1016 +
1017 +               node->ln_mode = HTREE_LOCK_INVAL;
1018 +               INIT_LIST_HEAD(&node->ln_major_list);
1019 +               INIT_LIST_HEAD(&node->ln_minor_list);
1020 +               INIT_LIST_HEAD(&node->ln_alive_list);
1021 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1022 +               INIT_LIST_HEAD(&node->ln_granted_list);
1023 +       }
1024 +
1025 +       return lck;
1026 +}
1027 +EXPORT_SYMBOL(htree_lock_alloc);
1028 +
1029 +/* free htree_lock node */
1030 +void
1031 +htree_lock_free(struct htree_lock *lck)
1032 +{
1033 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1034 +       kfree(lck);
1035 +}
1036 +EXPORT_SYMBOL(htree_lock_free);
1037 Index: linux-4.15.0/fs/ext4/namei.c
1038 ===================================================================
1039 --- linux-4.15.0.orig/fs/ext4/namei.c
1040 +++ linux-4.15.0/fs/ext4/namei.c
1041 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
1042                                         ext4_lblk_t *block)
1043  {
1044         struct buffer_head *bh;
1045 +       struct ext4_inode_info *ei = EXT4_I(inode);
1046         int err;
1047  
1048         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1049 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
1050                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1051                 return ERR_PTR(-ENOSPC);
1052  
1053 +       /* with parallel dir operations all appends
1054 +       * have to be serialized -bzzz */
1055 +       down(&ei->i_append_sem);
1056 +
1057         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1058  
1059         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
1060 -       if (IS_ERR(bh))
1061 +       if (IS_ERR(bh)) {
1062 +               up(&ei->i_append_sem);
1063                 return bh;
1064 +       }
1065         inode->i_size += inode->i_sb->s_blocksize;
1066         EXT4_I(inode)->i_disksize = inode->i_size;
1067         BUFFER_TRACE(bh, "get_write_access");
1068         err = ext4_journal_get_write_access(handle, bh);
1069 +       up(&ei->i_append_sem);
1070         if (err) {
1071                 brelse(bh);
1072                 ext4_std_error(inode->i_sb, err);
1073 @@ -248,7 +256,8 @@ static unsigned dx_node_limit(struct ino
1074  static struct dx_frame *dx_probe(struct ext4_filename *fname,
1075                                  struct inode *dir,
1076                                  struct dx_hash_info *hinfo,
1077 -                                struct dx_frame *frame);
1078 +                                struct dx_frame *frame,
1079 +                                struct htree_lock *lck);
1080  static void dx_release(struct dx_frame *frames);
1081  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1082                        unsigned blocksize, struct dx_hash_info *hinfo,
1083 @@ -262,12 +271,13 @@ static void dx_insert_block(struct dx_fr
1084  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1085                                  struct dx_frame *frame,
1086                                  struct dx_frame *frames,
1087 -                                __u32 *start_hash);
1088 +                                __u32 *start_hash, struct htree_lock *lck);
1089  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1090                 struct ext4_filename *fname,
1091 -               struct ext4_dir_entry_2 **res_dir);
1092 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
1093  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1094 -                            struct inode *dir, struct inode *inode);
1095 +                            struct inode *dir, struct inode *inode,
1096 +                            struct htree_lock *lck);
1097  
1098  /* checksumming functions */
1099  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1100 @@ -730,6 +740,227 @@ struct stats dx_show_entries(struct dx_h
1101  }
1102  #endif /* DX_DEBUG */
1103  
1104 +/* private data for htree_lock */
1105 +struct ext4_dir_lock_data {
1106 +       unsigned                ld_flags;  /* bits-map for lock types */
1107 +       unsigned                ld_count;  /* # entries of the last DX block */
1108 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1109 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1110 +};
1111 +
1112 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1113 +#define ext4_find_entry(dir, name, dirent, inline) \
1114 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
1115 +#define ext4_add_entry(handle, dentry, inode) \
1116 +                       __ext4_add_entry(handle, dentry, inode, NULL)
1117 +
1118 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1119 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1120 +
1121 +static void ext4_htree_event_cb(void *target, void *event)
1122 +{
1123 +       u64 *block = (u64 *)target;
1124 +
1125 +       if (*block == dx_get_block((struct dx_entry *)event))
1126 +               *block = EXT4_HTREE_NODE_CHANGED;
1127 +}
1128 +
1129 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1130 +{
1131 +       struct htree_lock_head *lhead;
1132 +
1133 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1134 +       if (lhead != NULL) {
1135 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1136 +                                       ext4_htree_event_cb);
1137 +       }
1138 +       return lhead;
1139 +}
1140 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1141 +
1142 +struct htree_lock *ext4_htree_lock_alloc(void)
1143 +{
1144 +       return htree_lock_alloc(EXT4_LK_MAX,
1145 +                               sizeof(struct ext4_dir_lock_data));
1146 +}
1147 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1148 +
1149 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1150 +{
1151 +       switch (flags) {
1152 +       default: /* 0 or unknown flags require EX lock */
1153 +               return HTREE_LOCK_EX;
1154 +       case EXT4_HLOCK_READDIR:
1155 +               return HTREE_LOCK_PR;
1156 +       case EXT4_HLOCK_LOOKUP:
1157 +               return HTREE_LOCK_CR;
1158 +       case EXT4_HLOCK_DEL:
1159 +       case EXT4_HLOCK_ADD:
1160 +               return HTREE_LOCK_CW;
1161 +       }
1162 +}
1163 +
1164 +/* return PR for read-only operations, otherwise return EX */
1165 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1166 +{
1167 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1168 +
1169 +       /* 0 requires EX lock */
1170 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1171 +}
1172 +
1173 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1174 +{
1175 +       int writer;
1176 +
1177 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1178 +               return 1;
1179 +
1180 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1181 +                EXT4_LB_DE;
1182 +       if (writer) /* all readers & writers are excluded? */
1183 +               return lck->lk_mode == HTREE_LOCK_EX;
1184 +
1185 +       /* all writers are excluded? */
1186 +       return lck->lk_mode == HTREE_LOCK_PR ||
1187 +              lck->lk_mode == HTREE_LOCK_PW ||
1188 +              lck->lk_mode == HTREE_LOCK_EX;
1189 +}
1190 +
1191 +/* relock htree_lock with EX mode if it's change operation, otherwise
1192 + * relock it with PR mode. It's noop if PDO is disabled. */
1193 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1194 +{
1195 +       if (!ext4_htree_safe_locked(lck)) {
1196 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1197 +
1198 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1199 +       }
1200 +}
1201 +
1202 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1203 +                    struct inode *dir, unsigned flags)
1204 +{
1205 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1206 +                                             ext4_htree_safe_mode(flags);
1207 +
1208 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1209 +       htree_lock(lck, lhead, mode);
1210 +       if (!is_dx(dir))
1211 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1212 +}
1213 +EXPORT_SYMBOL(ext4_htree_lock);
1214 +
1215 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1216 +                               unsigned lmask, int wait, void *ev)
1217 +{
1218 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1219 +       u32     mode;
1220 +
1221 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1222 +       if (ext4_htree_safe_locked(lck) ||
1223 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1224 +               return 1;
1225 +
1226 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1227 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1228 +       while (1) {
1229 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1230 +                       return 1;
1231 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1232 +                       return 0;
1233 +               cpu_relax(); /* spin until granted */
1234 +       }
1235 +}
1236 +
1237 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1238 +{
1239 +       return ext4_htree_safe_locked(lck) ||
1240 +              htree_node_is_granted(lck, ffz(~lmask));
1241 +}
1242 +
1243 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1244 +                                  unsigned lmask, void *buf)
1245 +{
1246 +       /* NB: it's safe to call mutiple times or even it's not locked */
1247 +       if (!ext4_htree_safe_locked(lck) &&
1248 +            htree_node_is_granted(lck, ffz(~lmask)))
1249 +               htree_node_unlock(lck, ffz(~lmask), buf);
1250 +}
1251 +
1252 +#define ext4_htree_dx_lock(lck, key)           \
1253 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1254 +#define ext4_htree_dx_lock_try(lck, key)       \
1255 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1256 +#define ext4_htree_dx_unlock(lck)              \
1257 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1258 +#define ext4_htree_dx_locked(lck)              \
1259 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1260 +
1261 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1262 +{
1263 +       struct ext4_dir_lock_data *ld;
1264 +
1265 +       if (ext4_htree_safe_locked(lck))
1266 +               return;
1267 +
1268 +       ld = ext4_htree_lock_data(lck);
1269 +       switch (ld->ld_flags) {
1270 +       default:
1271 +               return;
1272 +       case EXT4_HLOCK_LOOKUP:
1273 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1274 +               return;
1275 +       case EXT4_HLOCK_DEL:
1276 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1277 +               return;
1278 +       case EXT4_HLOCK_ADD:
1279 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1280 +               return;
1281 +       }
1282 +}
1283 +
1284 +#define ext4_htree_de_lock(lck, key)           \
1285 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1286 +#define ext4_htree_de_unlock(lck)              \
1287 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1288 +
1289 +#define ext4_htree_spin_lock(lck, key, event)  \
1290 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1291 +#define ext4_htree_spin_unlock(lck)            \
1292 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1293 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1294 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1295 +
1296 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1297 +{
1298 +       if (!ext4_htree_safe_locked(lck) &&
1299 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1300 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1301 +}
1302 +
1303 +enum {
1304 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1305 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1306 +       DX_HASH_COL_NO,         /* there is no collision */
1307 +};
1308 +
1309 +static int dx_probe_hash_collision(struct htree_lock *lck,
1310 +                                  struct dx_entry *entries,
1311 +                                  struct dx_entry *at, u32 hash)
1312 +{
1313 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1314 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1315 +
1316 +       } else if (at == entries + dx_get_count(entries) - 1) {
1317 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1318 +
1319 +       } else { /* hash collision? */
1320 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1321 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1322 +       }
1323 +}
1324 +
1325  /*
1326   * Probe for a directory leaf block to search.
1327   *
1328 @@ -741,10 +972,11 @@ struct stats dx_show_entries(struct dx_h
1329   */
1330  static struct dx_frame *
1331  dx_probe(struct ext4_filename *fname, struct inode *dir,
1332 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
1333 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1334 +        struct htree_lock *lck)
1335  {
1336         unsigned count, indirect;
1337 -       struct dx_entry *at, *entries, *p, *q, *m;
1338 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1339         struct dx_root_info *info;
1340         struct dx_frame *frame = frame_in;
1341         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
1342 @@ -806,8 +1038,15 @@ dx_probe(struct ext4_filename *fname, st
1343  
1344         dxtrace(printk("Look up %x", hash));
1345         while (1) {
1346 +               if (indirect == 0) { /* the last index level */
1347 +                       /* NB: ext4_htree_dx_lock() could be noop if
1348 +                        * DX-lock flag is not set for current operation */
1349 +                       ext4_htree_dx_lock(lck, dx);
1350 +                       ext4_htree_spin_lock(lck, dx, NULL);
1351 +               }
1352                 count = dx_get_count(entries);
1353 -               if (!count || count > dx_get_limit(entries)) {
1354 +               if (count == 0 || count > dx_get_limit(entries)) {
1355 +                       ext4_htree_spin_unlock(lck); /* release spin */
1356                         ext4_warning_inode(dir,
1357                                            "dx entry: count %u beyond limit %u",
1358                                            count, dx_get_limit(entries));
1359 @@ -846,8 +1085,70 @@ dx_probe(struct ext4_filename *fname, st
1360                                dx_get_block(at)));
1361                 frame->entries = entries;
1362                 frame->at = at;
1363 -               if (!indirect--)
1364 +
1365 +               if (indirect == 0) { /* the last index level */
1366 +                       struct ext4_dir_lock_data *ld;
1367 +                       u64 myblock;
1368 +
1369 +                       /* By default we only lock DE-block, however, we will
1370 +                        * also lock the last level DX-block if:
1371 +                        * a) there is hash collision
1372 +                        *    we will set DX-lock flag (a few lines below)
1373 +                        *    and redo to lock DX-block
1374 +                        *    see detail in dx_probe_hash_collision()
1375 +                        * b) it's a retry from splitting
1376 +                        *    we need to lock the last level DX-block so nobody
1377 +                        *    else can split any leaf blocks under the same
1378 +                        *    DX-block, see detail in ext4_dx_add_entry()
1379 +                        */
1380 +                       if (ext4_htree_dx_locked(lck)) {
1381 +                               /* DX-block is locked, just lock DE-block
1382 +                                * and return */
1383 +                               ext4_htree_spin_unlock(lck);
1384 +                               if (!ext4_htree_safe_locked(lck))
1385 +                                       ext4_htree_de_lock(lck, frame->at);
1386 +                               return frame;
1387 +                       }
1388 +                       /* it's pdirop and no DX lock */
1389 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1390 +                           DX_HASH_COL_YES) {
1391 +                               /* found hash collision, set DX-lock flag
1392 +                                * and retry to abtain DX-lock */
1393 +                               ext4_htree_spin_unlock(lck);
1394 +                               ext4_htree_dx_need_lock(lck);
1395 +                               continue;
1396 +                       }
1397 +                       ld = ext4_htree_lock_data(lck);
1398 +                       /* because I don't lock DX, so @at can't be trusted
1399 +                        * after I release spinlock so I have to save it */
1400 +                       ld->ld_at = at;
1401 +                       ld->ld_at_entry = *at;
1402 +                       ld->ld_count = dx_get_count(entries);
1403 +
1404 +                       frame->at = &ld->ld_at_entry;
1405 +                       myblock = dx_get_block(at);
1406 +
1407 +                       /* NB: ordering locking */
1408 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1409 +                       /* other thread can split this DE-block because:
1410 +                        * a) I don't have lock for the DE-block yet
1411 +                        * b) I released spinlock on DX-block
1412 +                        * if it happened I can detect it by listening
1413 +                        * splitting event on this DE-block */
1414 +                       ext4_htree_de_lock(lck, frame->at);
1415 +                       ext4_htree_spin_stop_listen(lck);
1416 +
1417 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1418 +                               /* someone split this DE-block before
1419 +                                * I locked it, I need to retry and lock
1420 +                                * valid DE-block */
1421 +                               ext4_htree_de_unlock(lck);
1422 +                               continue;
1423 +                       }
1424                         return frame;
1425 +               }
1426 +               dx = at;
1427 +               indirect--;
1428                 frame++;
1429                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1430                 if (IS_ERR(frame->bh)) {
1431 @@ -913,7 +1214,7 @@ static void dx_release(struct dx_frame *
1432  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1433                                  struct dx_frame *frame,
1434                                  struct dx_frame *frames,
1435 -                                __u32 *start_hash)
1436 +                                __u32 *start_hash, struct htree_lock *lck)
1437  {
1438         struct dx_frame *p;
1439         struct buffer_head *bh;
1440 @@ -928,12 +1229,22 @@ static int ext4_htree_next_block(struct
1441          * this loop, num_frames indicates the number of interior
1442          * nodes need to be read.
1443          */
1444 +       ext4_htree_de_unlock(lck);
1445         while (1) {
1446 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1447 -                       break;
1448 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1449 +                       /* num_frames > 0 :
1450 +                        *   DX block
1451 +                        * ext4_htree_dx_locked:
1452 +                        *   frame->at is reliable pointer returned by dx_probe,
1453 +                        *   otherwise dx_probe already knew no collision */
1454 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1455 +                               break;
1456 +               }
1457                 if (p == frames)
1458                         return 0;
1459                 num_frames++;
1460 +               if (num_frames == 1)
1461 +                       ext4_htree_dx_unlock(lck);
1462                 p--;
1463         }
1464  
1465 @@ -956,6 +1267,13 @@ static int ext4_htree_next_block(struct
1466          * block so no check is necessary
1467          */
1468         while (num_frames--) {
1469 +               if (num_frames == 0) {
1470 +                       /* it's not always necessary, we just don't want to
1471 +                        * detect hash collision again */
1472 +                       ext4_htree_dx_need_lock(lck);
1473 +                       ext4_htree_dx_lock(lck, p->at);
1474 +               }
1475 +
1476                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1477                 if (IS_ERR(bh))
1478                         return PTR_ERR(bh);
1479 @@ -964,6 +1282,7 @@ static int ext4_htree_next_block(struct
1480                 p->bh = bh;
1481                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1482         }
1483 +       ext4_htree_de_lock(lck, p->at);
1484         return 1;
1485  }
1486  
1487 @@ -1111,10 +1430,10 @@ int ext4_htree_fill_tree(struct file *di
1488         }
1489         hinfo.hash = start_hash;
1490         hinfo.minor_hash = 0;
1491 -       frame = dx_probe(NULL, dir, &hinfo, frames);
1492 +       /* assume it's PR locked */
1493 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
1494         if (IS_ERR(frame))
1495                 return PTR_ERR(frame);
1496 -
1497         /* Add '.' and '..' from the htree header */
1498         if (!start_hash && !start_minor_hash) {
1499                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1500 @@ -1154,7 +1473,7 @@ int ext4_htree_fill_tree(struct file *di
1501                 count += ret;
1502                 hashval = ~0;
1503                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1504 -                                           frame, frames, &hashval);
1505 +                                           frame, frames, &hashval, NULL);
1506                 *next_hash = hashval;
1507                 if (ret < 0) {
1508                         err = ret;
1509 @@ -1346,10 +1665,10 @@ static int is_dx_internal_node(struct in
1510   * The returned buffer_head has ->b_count elevated.  The caller is expected
1511   * to brelse() it when appropriate.
1512   */
1513 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1514 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1515                                         const struct qstr *d_name,
1516                                         struct ext4_dir_entry_2 **res_dir,
1517 -                                       int *inlined)
1518 +                                       int *inlined, struct htree_lock *lck)
1519  {
1520         struct super_block *sb;
1521         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1522 @@ -1398,7 +1717,7 @@ static struct buffer_head * ext4_find_en
1523                 goto restart;
1524         }
1525         if (is_dx(dir)) {
1526 -               ret = ext4_dx_find_entry(dir, &fname, res_dir);
1527 +               ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
1528                 /*
1529                  * On success, or if the error was file not found,
1530                  * return.  Otherwise, fall back to doing a search the
1531 @@ -1408,6 +1727,7 @@ static struct buffer_head * ext4_find_en
1532                         goto cleanup_and_exit;
1533                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1534                                "falling back\n"));
1535 +               ext4_htree_safe_relock(lck);
1536         }
1537         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1538         if (!nblocks) {
1539 @@ -1495,10 +1815,12 @@ cleanup_and_exit:
1540         ext4_fname_free_filename(&fname);
1541         return ret;
1542  }
1543 +EXPORT_SYMBOL(__ext4_find_entry);
1544  
1545  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1546                         struct ext4_filename *fname,
1547 -                       struct ext4_dir_entry_2 **res_dir)
1548 +                       struct ext4_dir_entry_2 **res_dir,
1549 +                       struct htree_lock *lck)
1550  {
1551         struct super_block * sb = dir->i_sb;
1552         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1553 @@ -1509,7 +1831,7 @@ static struct buffer_head * ext4_dx_find
1554  #ifdef CONFIG_EXT4_FS_ENCRYPTION
1555         *res_dir = NULL;
1556  #endif
1557 -       frame = dx_probe(fname, dir, NULL, frames);
1558 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1559         if (IS_ERR(frame))
1560                 return (struct buffer_head *) frame;
1561         do {
1562 @@ -1531,7 +1853,7 @@ static struct buffer_head * ext4_dx_find
1563  
1564                 /* Check to see if we should continue to search */
1565                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
1566 -                                              frames, NULL);
1567 +                                              frames, NULL, lck);
1568                 if (retval < 0) {
1569                         ext4_warning_inode(dir,
1570                                 "error %d reading directory index block",
1571 @@ -1706,8 +2028,9 @@ static struct ext4_dir_entry_2* dx_pack_
1572   * Returns pointer to de in block into which the new entry will be inserted.
1573   */
1574  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1575 -                       struct buffer_head **bh,struct dx_frame *frame,
1576 -                       struct dx_hash_info *hinfo)
1577 +                       struct buffer_head **bh, struct dx_frame *frames,
1578 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1579 +                       struct htree_lock *lck)
1580  {
1581         unsigned blocksize = dir->i_sb->s_blocksize;
1582         unsigned count, continued;
1583 @@ -1769,8 +2092,14 @@ static struct ext4_dir_entry_2 *do_split
1584                                         hash2, split, count-split));
1585  
1586         /* Fancy dance to stay within two buffers */
1587 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
1588 -                             blocksize);
1589 +       if (hinfo->hash < hash2) {
1590 +               de2 = dx_move_dirents(data1, data2, map + split,
1591 +                                     count - split, blocksize);
1592 +       } else {
1593 +               /* make sure we will add entry to the same block which
1594 +                * we have already locked */
1595 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1596 +       }
1597         de = dx_pack_dirents(data1, blocksize);
1598         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1599                                            (char *) de,
1600 @@ -1791,12 +2120,21 @@ static struct ext4_dir_entry_2 *do_split
1601         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1602                         blocksize, 1));
1603  
1604 -       /* Which block gets the new entry? */
1605 -       if (hinfo->hash >= hash2) {
1606 -               swap(*bh, bh2);
1607 -               de = de2;
1608 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1609 +                            frame->at); /* notify block is being split */
1610 +       if (hinfo->hash < hash2) {
1611 +               dx_insert_block(frame, hash2 + continued, newblock);
1612 +
1613 +       } else {
1614 +               /* switch block number */
1615 +               dx_insert_block(frame, hash2 + continued,
1616 +                               dx_get_block(frame->at));
1617 +               dx_set_block(frame->at, newblock);
1618 +               (frame->at)++;
1619         }
1620 -       dx_insert_block(frame, hash2 + continued, newblock);
1621 +       ext4_htree_spin_unlock(lck);
1622 +       ext4_htree_dx_unlock(lck);
1623 +
1624         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1625         if (err)
1626                 goto journal_error;
1627 @@ -2070,7 +2408,7 @@ static int make_indexed_dir(handle_t *ha
1628         if (retval)
1629                 goto out_frames;        
1630  
1631 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
1632 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
1633         if (IS_ERR(de)) {
1634                 retval = PTR_ERR(de);
1635                 goto out_frames;
1636 @@ -2180,8 +2518,8 @@ out:
1637   * may not sleep between calling this and putting something into
1638   * the entry, as someone else might have used it while you slept.
1639   */
1640 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1641 -                         struct inode *inode)
1642 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1643 +                     struct inode *inode, struct htree_lock *lck)
1644  {
1645         struct inode *dir = d_inode(dentry->d_parent);
1646         struct buffer_head *bh = NULL;
1647 @@ -2222,9 +2560,10 @@ static int ext4_add_entry(handle_t *hand
1648                 if (dentry->d_name.len == 2 &&
1649                     memcmp(dentry->d_name.name, "..", 2) == 0)
1650                         return ext4_update_dotdot(handle, dentry, inode);
1651 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
1652 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
1653                 if (!retval || (retval != ERR_BAD_DX_DIR))
1654                         goto out;
1655 +               ext4_htree_safe_relock(lck);
1656                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1657                 dx_fallback++;
1658                 ext4_mark_inode_dirty(handle, dir);
1659 @@ -2274,12 +2613,14 @@ out:
1660                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1661         return retval;
1662  }
1663 +EXPORT_SYMBOL(__ext4_add_entry);
1664  
1665  /*
1666   * Returns 0 for success, or a negative error value
1667   */
1668  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1669 -                            struct inode *dir, struct inode *inode)
1670 +                            struct inode *dir, struct inode *inode,
1671 +                            struct htree_lock *lck)
1672  {
1673         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1674         struct dx_entry *entries, *at;
1675 @@ -2291,7 +2632,7 @@ static int ext4_dx_add_entry(handle_t *h
1676  
1677  again:
1678         restart = 0;
1679 -       frame = dx_probe(fname, dir, NULL, frames);
1680 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1681         if (IS_ERR(frame))
1682                 return PTR_ERR(frame);
1683         entries = frame->entries;
1684 @@ -2326,6 +2667,11 @@ again:
1685                 struct dx_node *node2;
1686                 struct buffer_head *bh2;
1687  
1688 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1689 +                       ext4_htree_safe_relock(lck);
1690 +                       restart = 1;
1691 +                       goto cleanup;
1692 +               }
1693                 while (frame > frames) {
1694                         if (dx_get_count((frame - 1)->entries) <
1695                             dx_get_limit((frame - 1)->entries)) {
1696 @@ -2428,8 +2774,32 @@ again:
1697                         restart = 1;
1698                         goto journal_error;
1699                 }
1700 +       } else if (!ext4_htree_dx_locked(lck)) {
1701 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1702 +
1703 +               /* not well protected, require DX lock */
1704 +               ext4_htree_dx_need_lock(lck);
1705 +               at = frame > frames ? (frame - 1)->at : NULL;
1706 +
1707 +               /* NB: no risk of deadlock because it's just a try.
1708 +                *
1709 +                * NB: we check ld_count for twice, the first time before
1710 +                * having DX lock, the second time after holding DX lock.
1711 +                *
1712 +                * NB: We never free blocks for directory so far, which
1713 +                * means value returned by dx_get_count() should equal to
1714 +                * ld->ld_count if nobody split any DE-block under @at,
1715 +                * and ld->ld_at still points to valid dx_entry. */
1716 +               if ((ld->ld_count != dx_get_count(entries)) ||
1717 +                   !ext4_htree_dx_lock_try(lck, at) ||
1718 +                   (ld->ld_count != dx_get_count(entries))) {
1719 +                       restart = 1;
1720 +                       goto cleanup;
1721 +               }
1722 +               /* OK, I've got DX lock and nothing changed */
1723 +               frame->at = ld->ld_at;
1724         }
1725 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
1726 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
1727         if (IS_ERR(de)) {
1728                 err = PTR_ERR(de);
1729                 goto cleanup;
1730 @@ -2440,6 +2810,8 @@ again:
1731  journal_error:
1732         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1733  cleanup:
1734 +       ext4_htree_dx_unlock(lck);
1735 +       ext4_htree_de_unlock(lck);
1736         brelse(bh);
1737         dx_release(frames);
1738         /* @restart is true means htree-path has been changed, we need to
1739 Index: linux-4.15.0/fs/ext4/super.c
1740 ===================================================================
1741 --- linux-4.15.0.orig/fs/ext4/super.c
1742 +++ linux-4.15.0/fs/ext4/super.c
1743 @@ -975,6 +975,7 @@ static struct inode *ext4_alloc_inode(st
1744  
1745         ei->vfs_inode.i_version = 1;
1746         spin_lock_init(&ei->i_raw_lock);
1747 +       sema_init(&ei->i_append_sem, 1);
1748         INIT_LIST_HEAD(&ei->i_prealloc_list);
1749         spin_lock_init(&ei->i_prealloc_lock);
1750         ext4_es_init_tree(&ei->i_es_tree);
1751 Index: linux-4.15.0/include/linux/htree_lock.h
1752 ===================================================================
1753 --- /dev/null
1754 +++ linux-4.15.0/include/linux/htree_lock.h
1755 @@ -0,0 +1,187 @@
1756 +/*
1757 + * include/linux/htree_lock.h
1758 + *
1759 + * Copyright (c) 2011, 2012, Intel Corporation.
1760 + *
1761 + * Author: Liang Zhen <liang@whamcloud.com>
1762 + */
1763 +
1764 +/*
1765 + * htree lock
1766 + *
1767 + * htree_lock is an advanced lock, it can support five lock modes (concept is
1768 + * taken from DLM) and it's a sleeping lock.
1769 + *
1770 + * most common use case is:
1771 + * - create a htree_lock_head for data
1772 + * - each thread (contender) creates it's own htree_lock
1773 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
1774 + *   call htree_unlock to release lock
1775 + *
1776 + * Also, there is advanced use-case which is more complex, user can have
1777 + * PW/PR lock on particular key, it's mostly used while user holding shared
1778 + * lock on the htree (CW, CR)
1779 + *
1780 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
1781 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
1782 + * ...
1783 + * htree_node_unlock(lock_node);; unlock the key
1784 + *
1785 + * Another tip is, we can have N-levels of this kind of keys, all we need to
1786 + * do is specifying N-levels while creating htree_lock_head, then we can
1787 + * lock/unlock a specific level by:
1788 + * htree_node_lock(lock_node, mode1, key1, level1...);
1789 + * do something;
1790 + * htree_node_lock(lock_node, mode1, key2, level2...);
1791 + * do something;
1792 + * htree_node_unlock(lock_node, level2);
1793 + * htree_node_unlock(lock_node, level1);
1794 + *
1795 + * NB: for multi-level, should be careful about locking order to avoid deadlock
1796 + */
1797 +
1798 +#ifndef _LINUX_HTREE_LOCK_H
1799 +#define _LINUX_HTREE_LOCK_H
1800 +
1801 +#include <linux/list.h>
1802 +#include <linux/spinlock.h>
1803 +#include <linux/sched.h>
1804 +
1805 +/*
1806 + * Lock Modes
1807 + * more details can be found here:
1808 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
1809 + */
1810 +typedef enum {
1811 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
1812 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
1813 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
1814 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
1815 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
1816 +       HTREE_LOCK_MAX,      /* number of lock modes */
1817 +} htree_lock_mode_t;
1818 +
1819 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
1820 +#define HTREE_LOCK_INVAL       0xdead10c
1821 +
1822 +enum {
1823 +       HTREE_HBITS_MIN         = 2,
1824 +       HTREE_HBITS_DEF         = 14,
1825 +       HTREE_HBITS_MAX         = 32,
1826 +};
1827 +
1828 +enum {
1829 +       HTREE_EVENT_DISABLE     = (0),
1830 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
1831 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
1832 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
1833 +};
1834 +
1835 +struct htree_lock;
1836 +
1837 +typedef void (*htree_event_cb_t)(void *target, void *event);
1838 +
1839 +struct htree_lock_child {
1840 +       struct list_head        lc_list;        /* granted list */
1841 +       htree_event_cb_t        lc_callback;    /* event callback */
1842 +       unsigned                lc_events;      /* event types */
1843 +};
1844 +
1845 +struct htree_lock_head {
1846 +       unsigned long           lh_lock;        /* bits lock */
1847 +       /* blocked lock list (htree_lock) */
1848 +       struct list_head        lh_blocked_list;
1849 +       /* # key levels */
1850 +       u16                     lh_depth;
1851 +       /* hash bits for key and limit number of locks */
1852 +       u16                     lh_hbits;
1853 +       /* counters for blocked locks */
1854 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
1855 +       /* counters for granted locks */
1856 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
1857 +       /* private data */
1858 +       void                    *lh_private;
1859 +       /* array of children locks */
1860 +       struct htree_lock_child lh_children[0];
1861 +};
1862 +
1863 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
1864 +struct htree_lock_node {
1865 +       htree_lock_mode_t       ln_mode;
1866 +       /* major hash key */
1867 +       u16                     ln_major_key;
1868 +       /* minor hash key */
1869 +       u16                     ln_minor_key;
1870 +       struct list_head        ln_major_list;
1871 +       struct list_head        ln_minor_list;
1872 +       /* alive list, all locks (granted, blocked, listening) are on it */
1873 +       struct list_head        ln_alive_list;
1874 +       /* blocked list */
1875 +       struct list_head        ln_blocked_list;
1876 +       /* granted list */
1877 +       struct list_head        ln_granted_list;
1878 +       void                    *ln_ev_target;
1879 +};
1880 +
1881 +struct htree_lock {
1882 +       struct task_struct      *lk_task;
1883 +       struct htree_lock_head  *lk_head;
1884 +       void                    *lk_private;
1885 +       unsigned                lk_depth;
1886 +       htree_lock_mode_t       lk_mode;
1887 +       struct list_head        lk_blocked_list;
1888 +       struct htree_lock_node  lk_nodes[0];
1889 +};
1890 +
1891 +/* create a lock head, which stands for a resource */
1892 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
1893 +                                             unsigned hbits, unsigned priv);
1894 +/* free a lock head */
1895 +void htree_lock_head_free(struct htree_lock_head *lhead);
1896 +/* register event callback for child lock at level @depth */
1897 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
1898 +                            unsigned events, htree_event_cb_t callback);
1899 +/* create a lock handle, which stands for a thread */
1900 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
1901 +/* free a lock handle */
1902 +void htree_lock_free(struct htree_lock *lck);
1903 +/* lock htree, when @wait is true, 0 is returned if the lock can't
1904 + * be granted immediately */
1905 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
1906 +                  htree_lock_mode_t mode, int wait);
1907 +/* unlock htree */
1908 +void htree_unlock(struct htree_lock *lck);
1909 +/* unlock and relock htree with @new_mode */
1910 +int htree_change_lock_try(struct htree_lock *lck,
1911 +                         htree_lock_mode_t new_mode, int wait);
1912 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
1913 +/* require child lock (key) of htree at level @dep, @event will be sent to all
1914 + * listeners on this @key while lock being granted */
1915 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
1916 +                       u32 key, unsigned dep, int wait, void *event);
1917 +/* release child lock at level @dep, this lock will listen on it's key
1918 + * if @event isn't NULL, event_cb will be called against @lck while granting
1919 + * any other lock at level @dep with the same key */
1920 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
1921 +/* stop listening on child lock at level @dep */
1922 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
1923 +/* for debug */
1924 +void htree_lock_stat_print(int depth);
1925 +void htree_lock_stat_reset(void);
1926 +
1927 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
1928 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
1929 +
1930 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
1931 +
1932 +#define htree_node_lock(lck, mode, key, dep)   \
1933 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
1934 +/* this is only safe in thread context of lock owner */
1935 +#define htree_node_is_granted(lck, dep)                \
1936 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
1937 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
1938 +/* this is only safe in thread context of lock owner */
1939 +#define htree_node_is_listening(lck, dep)      \
1940 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
1941 +
1942 +#endif