Whamcloud - gitweb
LU-12904 ldiskfs: Add ldiskfs support for linux 5.4
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / linux-5.4 / ext4-pdirop.patch
1 From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
2 From: Shaun Tancheff <stancheff@cray.com>
3 Date: Wed, 15 Jan 2020 07:35:13 -0600
4 Subject: [PATCH] Single directory performance is a critical for HPC workloads.
5  In a typical use case an application creates a separate output file for each
6  node and task in a job. As nodes and tasks increase, hundreds of thousands of
7  files may be created in a single directory within a short window of time.
8  Today, both filename lookup and file system modifying operations (such as
9  create and unlink) are protected with a single lock for an entire ldiskfs
10  directory. PDO project will remove this bottleneck by introducing a parallel
11  locking mechanism for entire ldiskfs directories. This work will enable
12  multiple application threads to simultaneously lookup, create and unlink in
13  parallel.
14
15 This patch contains:
16  - pdirops support for ldiskfs
17  - integrate with osd-ldiskfs
18 ---
19  fs/ext4/Makefile           |   1 +
20  fs/ext4/ext4.h             |  78 ++++
21  fs/ext4/htree_lock.c       | 891 +++++++++++++++++++++++++++++++++++++
22  fs/ext4/namei.c            | 454 +++++++++++++++++--
23  fs/ext4/super.c            |   1 +
24  include/linux/htree_lock.h | 187 ++++++++
25  6 files changed, 1572 insertions(+), 40 deletions(-)
26  create mode 100644 fs/ext4/htree_lock.c
27  create mode 100644 include/linux/htree_lock.h
28
29 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
30 index b17ddc2..45a68cb 100644
31 --- a/fs/ext4/Makefile
32 +++ b/fs/ext4/Makefile
33 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
34  
35  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
36                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
37 +               htree_lock.o \
38                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
39                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
40                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
41 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42 index 78893a6..72c355d 100644
43 --- a/fs/ext4/ext4.h
44 +++ b/fs/ext4/ext4.h
45 @@ -29,6 +29,7 @@
46  #include <linux/timer.h>
47  #include <linux/version.h>
48  #include <linux/wait.h>
49 +#include <linux/htree_lock.h>
50  #include <linux/sched/signal.h>
51  #include <linux/blockgroup_lock.h>
52  #include <linux/percpu_counter.h>
53 @@ -961,6 +962,9 @@ struct ext4_inode_info {
54         __u32   i_dtime;
55         ext4_fsblk_t    i_file_acl;
56  
57 +       /* following fields for parallel directory operations -bzzz */
58 +       struct semaphore i_append_sem;
59 +
60         /*
61          * i_block_group is the number of the block group which contains
62          * this file's inode.  Constant across the lifetime of the inode,
63 @@ -2181,6 +2185,72 @@ struct dx_hash_info
64   */
65  #define HASH_NB_ALWAYS         1
66  
67 +/* assume name-hash is protected by upper layer */
68 +#define EXT4_HTREE_LOCK_HASH   0
69 +
70 +enum ext4_pdo_lk_types {
71 +#if EXT4_HTREE_LOCK_HASH
72 +       EXT4_LK_HASH,
73 +#endif
74 +       EXT4_LK_DX,             /* index block */
75 +       EXT4_LK_DE,             /* directory entry block */
76 +       EXT4_LK_SPIN,           /* spinlock */
77 +       EXT4_LK_MAX,
78 +};
79 +
80 +/* read-only bit */
81 +#define EXT4_LB_RO(b)          (1 << (b))
82 +/* read + write, high bits for writer */
83 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
84 +
85 +enum ext4_pdo_lock_bits {
86 +       /* DX lock bits */
87 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
88 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
89 +       /* DE lock bits */
90 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
91 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
92 +       /* DX spinlock bits */
93 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
94 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
95 +       /* accurate searching */
96 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
97 +};
98 +
99 +enum ext4_pdo_lock_opc {
100 +       /* external */
101 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
102 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
103 +                                  EXT4_LB_EXACT),
104 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
105 +                                  EXT4_LB_EXACT),
106 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
107 +
108 +       /* internal */
109 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
110 +                                  EXT4_LB_EXACT),
111 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
112 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
113 +};
114 +
115 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
116 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
117 +
118 +extern struct htree_lock *ext4_htree_lock_alloc(void);
119 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
120 +
121 +extern void ext4_htree_lock(struct htree_lock *lck,
122 +                           struct htree_lock_head *lhead,
123 +                           struct inode *dir, unsigned flags);
124 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
125 +
126 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
127 +                                       const struct qstr *d_name,
128 +                                       struct ext4_dir_entry_2 **res_dir,
129 +                                       int *inlined, struct htree_lock *lck);
130 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
131 +                     struct inode *inode, struct htree_lock *lck);
132 +
133  struct ext4_filename {
134         const struct qstr *usr_fname;
135         struct fscrypt_str disk_name;
136 @@ -2548,8 +2618,16 @@ void ext4_insert_dentry(struct inode *inode,
137                         struct ext4_filename *fname, void *data);
138  static inline void ext4_update_dx_flag(struct inode *inode)
139  {
140 +       /* Disable it for ldiskfs, because going from a DX directory to
141 +        * a non-DX directory while it is in use will completely break
142 +        * the htree-locking.
143 +        * If we really want to support this operation in the future,
144 +        * we need to exclusively lock the directory at here which will
145 +        * increase complexity of code */
146 +#if 0
147         if (!ext4_has_feature_dir_index(inode->i_sb))
148                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
149 +#endif
150  }
151  static const unsigned char ext4_filetype_table[] = {
152         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
153 diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
154 new file mode 100644
155 index 0000000..ee407ed
156 --- /dev/null
157 +++ b/fs/ext4/htree_lock.c
158 @@ -0,0 +1,891 @@
159 +/*
160 + * fs/ext4/htree_lock.c
161 + *
162 + * Copyright (c) 2011, 2012, Intel Corporation.
163 + *
164 + * Author: Liang Zhen <liang@whamcloud.com>
165 + */
166 +#include <linux/jbd2.h>
167 +#include <linux/hash.h>
168 +#include <linux/module.h>
169 +#include <linux/htree_lock.h>
170 +
171 +enum {
172 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
173 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
174 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
175 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
176 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
177 +};
178 +
179 +enum {
180 +       HTREE_LOCK_COMPAT_EX    = 0,
181 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
182 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
183 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
184 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
185 +                                 HTREE_LOCK_BIT_PW,
186 +};
187 +
188 +static int htree_lock_compat[] = {
189 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
190 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
191 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
192 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
193 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
194 +};
195 +
196 +/* max allowed htree-lock depth.
197 + * We only need depth=3 for ext4 although user can have higher value. */
198 +#define HTREE_LOCK_DEP_MAX     16
199 +
200 +#ifdef HTREE_LOCK_DEBUG
201 +
202 +static char *hl_name[] = {
203 +       [HTREE_LOCK_EX]         "EX",
204 +       [HTREE_LOCK_PW]         "PW",
205 +       [HTREE_LOCK_PR]         "PR",
206 +       [HTREE_LOCK_CW]         "CW",
207 +       [HTREE_LOCK_CR]         "CR",
208 +};
209 +
210 +/* lock stats */
211 +struct htree_lock_node_stats {
212 +       unsigned long long      blocked[HTREE_LOCK_MAX];
213 +       unsigned long long      granted[HTREE_LOCK_MAX];
214 +       unsigned long long      retried[HTREE_LOCK_MAX];
215 +       unsigned long long      events;
216 +};
217 +
218 +struct htree_lock_stats {
219 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
220 +       unsigned long long      granted[HTREE_LOCK_MAX];
221 +       unsigned long long      blocked[HTREE_LOCK_MAX];
222 +};
223 +
224 +static struct htree_lock_stats hl_stats;
225 +
226 +void htree_lock_stat_reset(void)
227 +{
228 +       memset(&hl_stats, 0, sizeof(hl_stats));
229 +}
230 +
231 +void htree_lock_stat_print(int depth)
232 +{
233 +       int     i;
234 +       int     j;
235 +
236 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
237 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
238 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
239 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
240 +       }
241 +       for (i = 0; i < depth; i++) {
242 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
243 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
244 +                       printk(KERN_DEBUG
245 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
246 +                               hl_name[j], hl_stats.nodes[i].granted[j],
247 +                               hl_stats.nodes[i].blocked[j],
248 +                               hl_stats.nodes[i].retried[j]);
249 +               }
250 +       }
251 +}
252 +
253 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
254 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
255 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
256 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
257 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
258 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
259 +
260 +#else /* !DEBUG */
261 +
262 +void htree_lock_stat_reset(void) {}
263 +void htree_lock_stat_print(int depth) {}
264 +
265 +#define lk_grant_inc(m)              do {} while (0)
266 +#define lk_block_inc(m)              do {} while (0)
267 +#define ln_grant_inc(d, m)    do {} while (0)
268 +#define ln_block_inc(d, m)    do {} while (0)
269 +#define ln_retry_inc(d, m)    do {} while (0)
270 +#define ln_event_inc(d)              do {} while (0)
271 +
272 +#endif /* DEBUG */
273 +
274 +EXPORT_SYMBOL(htree_lock_stat_reset);
275 +EXPORT_SYMBOL(htree_lock_stat_print);
276 +
277 +#define HTREE_DEP_ROOT           (-1)
278 +
279 +#define htree_spin_lock(lhead, dep)                            \
280 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
281 +#define htree_spin_unlock(lhead, dep)                          \
282 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
283 +
284 +#define htree_key_event_ignore(child, ln)                      \
285 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
286 +
287 +static int
288 +htree_key_list_empty(struct htree_lock_node *ln)
289 +{
290 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
291 +}
292 +
293 +static void
294 +htree_key_list_del_init(struct htree_lock_node *ln)
295 +{
296 +       struct htree_lock_node *tmp = NULL;
297 +
298 +       if (!list_empty(&ln->ln_minor_list)) {
299 +               tmp = list_entry(ln->ln_minor_list.next,
300 +                                struct htree_lock_node, ln_minor_list);
301 +               list_del_init(&ln->ln_minor_list);
302 +       }
303 +
304 +       if (list_empty(&ln->ln_major_list))
305 +               return;
306 +
307 +       if (tmp == NULL) { /* not on minor key list */
308 +               list_del_init(&ln->ln_major_list);
309 +       } else {
310 +               BUG_ON(!list_empty(&tmp->ln_major_list));
311 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
312 +       }
313 +}
314 +
315 +static void
316 +htree_key_list_replace_init(struct htree_lock_node *old,
317 +                           struct htree_lock_node *new)
318 +{
319 +       if (!list_empty(&old->ln_major_list))
320 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
321 +
322 +       if (!list_empty(&old->ln_minor_list))
323 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
324 +}
325 +
326 +static void
327 +htree_key_event_enqueue(struct htree_lock_child *child,
328 +                       struct htree_lock_node *ln, int dep, void *event)
329 +{
330 +       struct htree_lock_node *tmp;
331 +
332 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
333 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
334 +       if (event == NULL || htree_key_event_ignore(child, ln))
335 +               return;
336 +
337 +       /* shouldn't be a very long list */
338 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
339 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
340 +                       ln_event_inc(dep);
341 +                       if (child->lc_callback != NULL)
342 +                               child->lc_callback(tmp->ln_ev_target, event);
343 +               }
344 +       }
345 +}
346 +
347 +static int
348 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
349 +                       unsigned dep, int wait, void *event)
350 +{
351 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
352 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
353 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
354 +
355 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
356 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
357 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
358 +        * NL is only used for listener, user can't directly require NL mode */
359 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
360 +           (curln->ln_mode != HTREE_LOCK_PW &&
361 +            newln->ln_mode != HTREE_LOCK_PW)) {
362 +               /* no conflict, attach it on granted list of @curlk */
363 +               if (curln->ln_mode != HTREE_LOCK_NL) {
364 +                       list_add(&newln->ln_granted_list,
365 +                                &curln->ln_granted_list);
366 +               } else {
367 +                       /* replace key owner */
368 +                       htree_key_list_replace_init(curln, newln);
369 +               }
370 +
371 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
372 +               htree_key_event_enqueue(child, newln, dep, event);
373 +               ln_grant_inc(dep, newln->ln_mode);
374 +               return 1; /* still hold lh_lock */
375 +       }
376 +
377 +       if (!wait) { /* can't grant and don't want to wait */
378 +               ln_retry_inc(dep, newln->ln_mode);
379 +               newln->ln_mode = HTREE_LOCK_INVAL;
380 +               return -1; /* don't wait and just return -1 */
381 +       }
382 +
383 +       newlk->lk_task = current;
384 +       set_current_state(TASK_UNINTERRUPTIBLE);
385 +       /* conflict, attach it on blocked list of curlk */
386 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
387 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
388 +       ln_block_inc(dep, newln->ln_mode);
389 +
390 +       htree_spin_unlock(newlk->lk_head, dep);
391 +       /* wait to be given the lock */
392 +       if (newlk->lk_task != NULL)
393 +               schedule();
394 +       /* granted, no doubt, wake up will set me RUNNING */
395 +       if (event == NULL || htree_key_event_ignore(child, newln))
396 +               return 0; /* granted without lh_lock */
397 +
398 +       htree_spin_lock(newlk->lk_head, dep);
399 +       htree_key_event_enqueue(child, newln, dep, event);
400 +       return 1; /* still hold lh_lock */
401 +}
402 +
403 +/*
404 + * get PR/PW access to particular tree-node according to @dep and @key,
405 + * it will return -1 if @wait is false and can't immediately grant this lock.
406 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
407 + * @event if it's not NULL.
408 + * NB: ALWAYS called holding lhead::lh_lock
409 + */
410 +static int
411 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
412 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
413 +                        int wait, void *event)
414 +{
415 +       LIST_HEAD(list);
416 +       struct htree_lock       *tmp;
417 +       struct htree_lock       *tmp2;
418 +       u16                     major;
419 +       u16                     minor;
420 +       u8                      reverse;
421 +       u8                      ma_bits;
422 +       u8                      mi_bits;
423 +
424 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
425 +       BUG_ON(htree_node_is_granted(lck, dep));
426 +
427 +       key = hash_long(key, lhead->lh_hbits);
428 +
429 +       mi_bits = lhead->lh_hbits >> 1;
430 +       ma_bits = lhead->lh_hbits - mi_bits;
431 +
432 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
433 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
434 +       lck->lk_nodes[dep].ln_mode = mode;
435 +
436 +       /*
437 +        * The major key list is an ordered list, so searches are started
438 +        * at the end of the list that is numerically closer to major_key,
439 +        * so at most half of the list will be walked (for well-distributed
440 +        * keys). The list traversal aborts early if the expected key
441 +        * location is passed.
442 +        */
443 +       reverse = (major >= (1 << (ma_bits - 1)));
444 +
445 +       if (reverse) {
446 +               list_for_each_entry_reverse(tmp,
447 +                                       &lhead->lh_children[dep].lc_list,
448 +                                       lk_nodes[dep].ln_major_list) {
449 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
450 +                               goto search_minor;
451 +
452 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
453 +                               /* attach _after_ @tmp */
454 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
455 +                                        &tmp->lk_nodes[dep].ln_major_list);
456 +                               goto out_grant_major;
457 +                       }
458 +               }
459 +
460 +               list_add(&lck->lk_nodes[dep].ln_major_list,
461 +                        &lhead->lh_children[dep].lc_list);
462 +               goto out_grant_major;
463 +
464 +       } else {
465 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
466 +                                   lk_nodes[dep].ln_major_list) {
467 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
468 +                               goto search_minor;
469 +
470 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
471 +                               /* insert _before_ @tmp */
472 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
473 +                                       &tmp->lk_nodes[dep].ln_major_list);
474 +                               goto out_grant_major;
475 +                       }
476 +               }
477 +
478 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
479 +                             &lhead->lh_children[dep].lc_list);
480 +               goto out_grant_major;
481 +       }
482 +
483 + search_minor:
484 +       /*
485 +        * NB: minor_key list doesn't have a "head", @list is just a
486 +        * temporary stub for helping list searching, make sure it's removed
487 +        * after searching.
488 +        * minor_key list is an ordered list too.
489 +        */
490 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
491 +
492 +       reverse = (minor >= (1 << (mi_bits - 1)));
493 +
494 +       if (reverse) {
495 +               list_for_each_entry_reverse(tmp2, &list,
496 +                                           lk_nodes[dep].ln_minor_list) {
497 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
498 +                               goto out_enqueue;
499 +
500 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
501 +                               /* attach _after_ @tmp2 */
502 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
503 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
504 +                               goto out_grant_minor;
505 +                       }
506 +               }
507 +
508 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
509 +
510 +       } else {
511 +               list_for_each_entry(tmp2, &list,
512 +                                   lk_nodes[dep].ln_minor_list) {
513 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
514 +                               goto out_enqueue;
515 +
516 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
517 +                               /* insert _before_ @tmp2 */
518 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
519 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
520 +                               goto out_grant_minor;
521 +                       }
522 +               }
523 +
524 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
525 +       }
526 +
527 + out_grant_minor:
528 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
529 +               /* new lock @lck is the first one on minor_key list, which
530 +                * means it has the smallest minor_key and it should
531 +                * replace @tmp as minor_key owner */
532 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
533 +                                 &lck->lk_nodes[dep].ln_major_list);
534 +       }
535 +       /* remove the temporary head */
536 +       list_del(&list);
537 +
538 + out_grant_major:
539 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
540 +       return 1; /* granted with holding lh_lock */
541 +
542 + out_enqueue:
543 +       list_del(&list); /* remove temprary head */
544 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
545 +}
546 +
547 +/*
548 + * release the key of @lck at level @dep, and grant any blocked locks.
549 + * caller will still listen on @key if @event is not NULL, which means
550 + * caller can see a event (by event_cb) while granting any lock with
551 + * the same key at level @dep.
552 + * NB: ALWAYS called holding lhead::lh_lock
553 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
554 + */
555 +static void
556 +htree_node_unlock_internal(struct htree_lock_head *lhead,
557 +                          struct htree_lock *curlk, unsigned dep, void *event)
558 +{
559 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
560 +       struct htree_lock       *grtlk = NULL;
561 +       struct htree_lock_node  *grtln;
562 +       struct htree_lock       *poslk;
563 +       struct htree_lock       *tmplk;
564 +
565 +       if (!htree_node_is_granted(curlk, dep))
566 +               return;
567 +
568 +       if (!list_empty(&curln->ln_granted_list)) {
569 +               /* there is another granted lock */
570 +               grtlk = list_entry(curln->ln_granted_list.next,
571 +                                  struct htree_lock,
572 +                                  lk_nodes[dep].ln_granted_list);
573 +               list_del_init(&curln->ln_granted_list);
574 +       }
575 +
576 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
577 +               /*
578 +                * @curlk is the only granted lock, so we confirmed:
579 +                * a) curln is key owner (attached on major/minor_list),
580 +                *    so if there is any blocked lock, it should be attached
581 +                *    on curln->ln_blocked_list
582 +                * b) we always can grant the first blocked lock
583 +                */
584 +               grtlk = list_entry(curln->ln_blocked_list.next,
585 +                                  struct htree_lock,
586 +                                  lk_nodes[dep].ln_blocked_list);
587 +               BUG_ON(grtlk->lk_task == NULL);
588 +               wake_up_process(grtlk->lk_task);
589 +       }
590 +
591 +       if (event != NULL &&
592 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
593 +               curln->ln_ev_target = event;
594 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
595 +       } else {
596 +               curln->ln_mode = HTREE_LOCK_INVAL;
597 +       }
598 +
599 +       if (grtlk == NULL) { /* I must be the only one locking this key */
600 +               struct htree_lock_node *tmpln;
601 +
602 +               BUG_ON(htree_key_list_empty(curln));
603 +
604 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
605 +                       return;
606 +
607 +               /* not listening */
608 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
609 +                       htree_key_list_del_init(curln);
610 +                       return;
611 +               }
612 +
613 +               tmpln = list_entry(curln->ln_alive_list.next,
614 +                                  struct htree_lock_node, ln_alive_list);
615 +
616 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
617 +
618 +               htree_key_list_replace_init(curln, tmpln);
619 +               list_del_init(&curln->ln_alive_list);
620 +
621 +               return;
622 +       }
623 +
624 +       /* have a granted lock */
625 +       grtln = &grtlk->lk_nodes[dep];
626 +       if (!list_empty(&curln->ln_blocked_list)) {
627 +               /* only key owner can be on both lists */
628 +               BUG_ON(htree_key_list_empty(curln));
629 +
630 +               if (list_empty(&grtln->ln_blocked_list)) {
631 +                       list_add(&grtln->ln_blocked_list,
632 +                                &curln->ln_blocked_list);
633 +               }
634 +               list_del_init(&curln->ln_blocked_list);
635 +       }
636 +       /*
637 +        * NB: this is the tricky part:
638 +        * We have only two modes for child-lock (PR and PW), also,
639 +        * only owner of the key (attached on major/minor_list) can be on
640 +        * both blocked_list and granted_list, so @grtlk must be one
641 +        * of these two cases:
642 +        *
643 +        * a) @grtlk is taken from granted_list, which means we've granted
644 +        *    more than one lock so @grtlk has to be PR, the first blocked
645 +        *    lock must be PW and we can't grant it at all.
646 +        *    So even @grtlk is not owner of the key (empty blocked_list),
647 +        *    we don't care because we can't grant any lock.
648 +        * b) we just grant a new lock which is taken from head of blocked
649 +        *    list, and it should be the first granted lock, and it should
650 +        *    be the first one linked on blocked_list.
651 +        *
652 +        * Either way, we can get correct result by iterating blocked_list
653 +        * of @grtlk, and don't have to bother on how to find out
654 +        * owner of current key.
655 +        */
656 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
657 +                                lk_nodes[dep].ln_blocked_list) {
658 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
659 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
660 +                       break;
661 +               /* grant all readers */
662 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
663 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
664 +                        &grtln->ln_granted_list);
665 +
666 +               BUG_ON(poslk->lk_task == NULL);
667 +               wake_up_process(poslk->lk_task);
668 +       }
669 +
670 +       /* if @curln is the owner of this key, replace it with @grtln */
671 +       if (!htree_key_list_empty(curln))
672 +               htree_key_list_replace_init(curln, grtln);
673 +
674 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
675 +               list_del_init(&curln->ln_alive_list);
676 +}
677 +
678 +/*
679 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
680 + * and 0 only if @wait is false and can't grant it immediately
681 + */
682 +int
683 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
684 +                   u32 key, unsigned dep, int wait, void *event)
685 +{
686 +       struct htree_lock_head *lhead = lck->lk_head;
687 +       int rc;
688 +
689 +       BUG_ON(dep >= lck->lk_depth);
690 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
691 +
692 +       htree_spin_lock(lhead, dep);
693 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
694 +       if (rc != 0)
695 +               htree_spin_unlock(lhead, dep);
696 +       return rc >= 0;
697 +}
698 +EXPORT_SYMBOL(htree_node_lock_try);
699 +
700 +/* it's wrapper of htree_node_unlock_internal */
701 +void
702 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
703 +{
704 +       struct htree_lock_head *lhead = lck->lk_head;
705 +
706 +       BUG_ON(dep >= lck->lk_depth);
707 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
708 +
709 +       htree_spin_lock(lhead, dep);
710 +       htree_node_unlock_internal(lhead, lck, dep, event);
711 +       htree_spin_unlock(lhead, dep);
712 +}
713 +EXPORT_SYMBOL(htree_node_unlock);
714 +
715 +/* stop listening on child-lock level @dep */
716 +void
717 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
718 +{
719 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
720 +       struct htree_lock_node *tmp;
721 +
722 +       BUG_ON(htree_node_is_granted(lck, dep));
723 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
724 +       BUG_ON(!list_empty(&ln->ln_granted_list));
725 +
726 +       if (!htree_node_is_listening(lck, dep))
727 +               return;
728 +
729 +       htree_spin_lock(lck->lk_head, dep);
730 +       ln->ln_mode = HTREE_LOCK_INVAL;
731 +       ln->ln_ev_target = NULL;
732 +
733 +       if (htree_key_list_empty(ln)) { /* not owner */
734 +               list_del_init(&ln->ln_alive_list);
735 +               goto out;
736 +       }
737 +
738 +       /* I'm the owner... */
739 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
740 +               htree_key_list_del_init(ln);
741 +               goto out;
742 +       }
743 +
744 +       tmp = list_entry(ln->ln_alive_list.next,
745 +                        struct htree_lock_node, ln_alive_list);
746 +
747 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
748 +       htree_key_list_replace_init(ln, tmp);
749 +       list_del_init(&ln->ln_alive_list);
750 + out:
751 +       htree_spin_unlock(lck->lk_head, dep);
752 +}
753 +EXPORT_SYMBOL(htree_node_stop_listen);
754 +
755 +/* release all child-locks if we have any */
756 +static void
757 +htree_node_release_all(struct htree_lock *lck)
758 +{
759 +       int     i;
760 +
761 +       for (i = 0; i < lck->lk_depth; i++) {
762 +               if (htree_node_is_granted(lck, i))
763 +                       htree_node_unlock(lck, i, NULL);
764 +               else if (htree_node_is_listening(lck, i))
765 +                       htree_node_stop_listen(lck, i);
766 +       }
767 +}
768 +
769 +/*
770 + * obtain htree lock, it could be blocked inside if there's conflict
771 + * with any granted or blocked lock and @wait is true.
772 + * NB: ALWAYS called holding lhead::lh_lock
773 + */
774 +static int
775 +htree_lock_internal(struct htree_lock *lck, int wait)
776 +{
777 +       struct htree_lock_head *lhead = lck->lk_head;
778 +       int     granted = 0;
779 +       int     blocked = 0;
780 +       int     i;
781 +
782 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
783 +               if (lhead->lh_ngranted[i] != 0)
784 +                       granted |= 1 << i;
785 +               if (lhead->lh_nblocked[i] != 0)
786 +                       blocked |= 1 << i;
787 +       }
788 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
789 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
790 +               /* will block current lock even it just conflicts with any
791 +                * other blocked lock, so lock like EX wouldn't starve */
792 +               if (!wait)
793 +                       return -1;
794 +               lhead->lh_nblocked[lck->lk_mode]++;
795 +               lk_block_inc(lck->lk_mode);
796 +
797 +               lck->lk_task = current;
798 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
799 +
800 +retry:
801 +               set_current_state(TASK_UNINTERRUPTIBLE);
802 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
803 +               /* wait to be given the lock */
804 +               if (lck->lk_task != NULL)
805 +                       schedule();
806 +               /* granted, no doubt. wake up will set me RUNNING.
807 +                * Since thread would be waken up accidentally,
808 +                * so we need check lock whether granted or not again. */
809 +               if (!list_empty(&lck->lk_blocked_list)) {
810 +                       htree_spin_lock(lhead, HTREE_DEP_ROOT);
811 +                       if (list_empty(&lck->lk_blocked_list)) {
812 +                               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
813 +                               return 0;
814 +                       }
815 +                       goto retry;
816 +               }
817 +               return 0; /* without lh_lock */
818 +       }
819 +       lhead->lh_ngranted[lck->lk_mode]++;
820 +       lk_grant_inc(lck->lk_mode);
821 +       return 1;
822 +}
823 +
824 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
825 +static void
826 +htree_unlock_internal(struct htree_lock *lck)
827 +{
828 +       struct htree_lock_head *lhead = lck->lk_head;
829 +       struct htree_lock *tmp;
830 +       struct htree_lock *tmp2;
831 +       int granted = 0;
832 +       int i;
833 +
834 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
835 +
836 +       lhead->lh_ngranted[lck->lk_mode]--;
837 +       lck->lk_mode = HTREE_LOCK_INVAL;
838 +
839 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
840 +               if (lhead->lh_ngranted[i] != 0)
841 +                       granted |= 1 << i;
842 +       }
843 +       list_for_each_entry_safe(tmp, tmp2,
844 +                                &lhead->lh_blocked_list, lk_blocked_list) {
845 +               /* conflict with any granted lock? */
846 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
847 +                       break;
848 +
849 +               list_del_init(&tmp->lk_blocked_list);
850 +
851 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
852 +
853 +               lhead->lh_nblocked[tmp->lk_mode]--;
854 +               lhead->lh_ngranted[tmp->lk_mode]++;
855 +               granted |= 1 << tmp->lk_mode;
856 +
857 +               BUG_ON(tmp->lk_task == NULL);
858 +               wake_up_process(tmp->lk_task);
859 +       }
860 +}
861 +
862 +/* it's wrapper of htree_lock_internal and exported interface.
863 + * It always return 1 with granted lock if @wait is true, it can return 0
864 + * if @wait is false and locking request can't be granted immediately */
865 +int
866 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
867 +              htree_lock_mode_t mode, int wait)
868 +{
869 +       int     rc;
870 +
871 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
872 +       BUG_ON(lck->lk_head != NULL);
873 +       BUG_ON(lck->lk_task != NULL);
874 +
875 +       lck->lk_head = lhead;
876 +       lck->lk_mode = mode;
877 +
878 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
879 +       rc = htree_lock_internal(lck, wait);
880 +       if (rc != 0)
881 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
882 +       return rc >= 0;
883 +}
884 +EXPORT_SYMBOL(htree_lock_try);
885 +
886 +/* it's wrapper of htree_unlock_internal and exported interface.
887 + * It will release all htree_node_locks and htree_lock */
888 +void
889 +htree_unlock(struct htree_lock *lck)
890 +{
891 +       BUG_ON(lck->lk_head == NULL);
892 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
893 +
894 +       htree_node_release_all(lck);
895 +
896 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
897 +       htree_unlock_internal(lck);
898 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
899 +       lck->lk_head = NULL;
900 +       lck->lk_task = NULL;
901 +}
902 +EXPORT_SYMBOL(htree_unlock);
903 +
904 +/* change lock mode */
905 +void
906 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
907 +{
908 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
909 +       lck->lk_mode = mode;
910 +}
911 +EXPORT_SYMBOL(htree_change_mode);
912 +
913 +/* release htree lock, and lock it again with new mode.
914 + * This function will first release all htree_node_locks and htree_lock,
915 + * then try to gain htree_lock with new @mode.
916 + * It always return 1 with granted lock if @wait is true, it can return 0
917 + * if @wait is false and locking request can't be granted immediately */
918 +int
919 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
920 +{
921 +       struct htree_lock_head *lhead = lck->lk_head;
922 +       int rc;
923 +
924 +       BUG_ON(lhead == NULL);
925 +       BUG_ON(lck->lk_mode == mode);
926 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
927 +
928 +       htree_node_release_all(lck);
929 +
930 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
931 +       htree_unlock_internal(lck);
932 +       lck->lk_mode = mode;
933 +       rc = htree_lock_internal(lck, wait);
934 +       if (rc != 0)
935 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
936 +       return rc >= 0;
937 +}
938 +EXPORT_SYMBOL(htree_change_lock_try);
939 +
940 +/* create a htree_lock head with @depth levels (number of child-locks),
941 + * it is a per resoruce structure */
942 +struct htree_lock_head *
943 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
944 +{
945 +       struct htree_lock_head *lhead;
946 +       int  i;
947 +
948 +       if (depth > HTREE_LOCK_DEP_MAX) {
949 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
950 +                       depth, HTREE_LOCK_DEP_MAX);
951 +               return NULL;
952 +       }
953 +
954 +       lhead = kzalloc(offsetof(struct htree_lock_head,
955 +                                lh_children[depth]) + priv, GFP_NOFS);
956 +       if (lhead == NULL)
957 +               return NULL;
958 +
959 +       if (hbits < HTREE_HBITS_MIN)
960 +               lhead->lh_hbits = HTREE_HBITS_MIN;
961 +       else if (hbits > HTREE_HBITS_MAX)
962 +               lhead->lh_hbits = HTREE_HBITS_MAX;
963 +
964 +       lhead->lh_lock = 0;
965 +       lhead->lh_depth = depth;
966 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
967 +       if (priv > 0) {
968 +               lhead->lh_private = (void *)lhead +
969 +                       offsetof(struct htree_lock_head, lh_children[depth]);
970 +       }
971 +
972 +       for (i = 0; i < depth; i++) {
973 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
974 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
975 +       }
976 +       return lhead;
977 +}
978 +EXPORT_SYMBOL(htree_lock_head_alloc);
979 +
980 +/* free the htree_lock head */
981 +void
982 +htree_lock_head_free(struct htree_lock_head *lhead)
983 +{
984 +       int     i;
985 +
986 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
987 +       for (i = 0; i < lhead->lh_depth; i++)
988 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
989 +       kfree(lhead);
990 +}
991 +EXPORT_SYMBOL(htree_lock_head_free);
992 +
993 +/* register event callback for @events of child-lock at level @dep */
994 +void
995 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
996 +                       unsigned events, htree_event_cb_t callback)
997 +{
998 +       BUG_ON(lhead->lh_depth <= dep);
999 +       lhead->lh_children[dep].lc_events = events;
1000 +       lhead->lh_children[dep].lc_callback = callback;
1001 +}
1002 +EXPORT_SYMBOL(htree_lock_event_attach);
1003 +
1004 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1005 + * extra-bytes as private data for caller */
1006 +struct htree_lock *
1007 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1008 +{
1009 +       struct htree_lock *lck;
1010 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1011 +
1012 +       if (depth > HTREE_LOCK_DEP_MAX) {
1013 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1014 +                       depth, HTREE_LOCK_DEP_MAX);
1015 +               return NULL;
1016 +       }
1017 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1018 +       if (lck == NULL)
1019 +               return NULL;
1020 +
1021 +       if (pbytes != 0)
1022 +               lck->lk_private = (void *)lck + i;
1023 +       lck->lk_mode = HTREE_LOCK_INVAL;
1024 +       lck->lk_depth = depth;
1025 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1026 +
1027 +       for (i = 0; i < depth; i++) {
1028 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1029 +
1030 +               node->ln_mode = HTREE_LOCK_INVAL;
1031 +               INIT_LIST_HEAD(&node->ln_major_list);
1032 +               INIT_LIST_HEAD(&node->ln_minor_list);
1033 +               INIT_LIST_HEAD(&node->ln_alive_list);
1034 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1035 +               INIT_LIST_HEAD(&node->ln_granted_list);
1036 +       }
1037 +
1038 +       return lck;
1039 +}
1040 +EXPORT_SYMBOL(htree_lock_alloc);
1041 +
1042 +/* free htree_lock node */
1043 +void
1044 +htree_lock_free(struct htree_lock *lck)
1045 +{
1046 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1047 +       kfree(lck);
1048 +}
1049 +EXPORT_SYMBOL(htree_lock_free);
1050 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1051 index 91525f7..9c57749 100644
1052 --- a/fs/ext4/namei.c
1053 +++ b/fs/ext4/namei.c
1054 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t *handle,
1055                                         ext4_lblk_t *block)
1056  {
1057         struct buffer_head *bh;
1058 +       struct ext4_inode_info *ei = EXT4_I(inode);
1059         int err;
1060  
1061         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1062 @@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t *handle,
1063                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1064                 return ERR_PTR(-ENOSPC);
1065  
1066 +       /* with parallel dir operations all appends
1067 +       * have to be serialized -bzzz */
1068 +       down(&ei->i_append_sem);
1069 +
1070         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1071  
1072         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
1073 -       if (IS_ERR(bh))
1074 +       if (IS_ERR(bh)) {
1075 +               up(&ei->i_append_sem);
1076                 return bh;
1077 +       }
1078         inode->i_size += inode->i_sb->s_blocksize;
1079         EXT4_I(inode)->i_disksize = inode->i_size;
1080         BUFFER_TRACE(bh, "get_write_access");
1081         err = ext4_journal_get_write_access(handle, bh);
1082 +       up(&ei->i_append_sem);
1083         if (err) {
1084                 brelse(bh);
1085                 ext4_std_error(inode->i_sb, err);
1086 @@ -264,7 +272,8 @@ static unsigned dx_node_limit(struct inode *dir);
1087  static struct dx_frame *dx_probe(struct ext4_filename *fname,
1088                                  struct inode *dir,
1089                                  struct dx_hash_info *hinfo,
1090 -                                struct dx_frame *frame);
1091 +                                struct dx_frame *frame,
1092 +                                struct htree_lock *lck);
1093  static void dx_release(struct dx_frame *frames);
1094  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1095                        unsigned blocksize, struct dx_hash_info *hinfo,
1096 @@ -278,12 +287,13 @@ static void dx_insert_block(struct dx_frame *frame,
1097  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1098                                  struct dx_frame *frame,
1099                                  struct dx_frame *frames,
1100 -                                __u32 *start_hash);
1101 +                                __u32 *start_hash, struct htree_lock *lck);
1102  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1103                 struct ext4_filename *fname,
1104 -               struct ext4_dir_entry_2 **res_dir);
1105 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
1106  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1107 -                            struct inode *dir, struct inode *inode);
1108 +                            struct inode *dir, struct inode *inode,
1109 +                            struct htree_lock *lck);
1110  
1111  /* checksumming functions */
1112  void ext4_initialize_dirent_tail(struct buffer_head *bh,
1113 @@ -748,6 +758,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1114  }
1115  #endif /* DX_DEBUG */
1116  
1117 +/* private data for htree_lock */
1118 +struct ext4_dir_lock_data {
1119 +       unsigned                ld_flags;  /* bits-map for lock types */
1120 +       unsigned                ld_count;  /* # entries of the last DX block */
1121 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1122 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1123 +};
1124 +
1125 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1126 +#define ext4_find_entry(dir, name, dirent, inline) \
1127 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
1128 +#define ext4_add_entry(handle, dentry, inode) \
1129 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
1130 +
1131 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1132 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1133 +
1134 +static void ext4_htree_event_cb(void *target, void *event)
1135 +{
1136 +       u64 *block = (u64 *)target;
1137 +
1138 +       if (*block == dx_get_block((struct dx_entry *)event))
1139 +               *block = EXT4_HTREE_NODE_CHANGED;
1140 +}
1141 +
1142 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1143 +{
1144 +       struct htree_lock_head *lhead;
1145 +
1146 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1147 +       if (lhead != NULL) {
1148 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1149 +                                       ext4_htree_event_cb);
1150 +       }
1151 +       return lhead;
1152 +}
1153 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1154 +
1155 +struct htree_lock *ext4_htree_lock_alloc(void)
1156 +{
1157 +       return htree_lock_alloc(EXT4_LK_MAX,
1158 +                               sizeof(struct ext4_dir_lock_data));
1159 +}
1160 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1161 +
1162 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1163 +{
1164 +       switch (flags) {
1165 +       default: /* 0 or unknown flags require EX lock */
1166 +               return HTREE_LOCK_EX;
1167 +       case EXT4_HLOCK_READDIR:
1168 +               return HTREE_LOCK_PR;
1169 +       case EXT4_HLOCK_LOOKUP:
1170 +               return HTREE_LOCK_CR;
1171 +       case EXT4_HLOCK_DEL:
1172 +       case EXT4_HLOCK_ADD:
1173 +               return HTREE_LOCK_CW;
1174 +       }
1175 +}
1176 +
1177 +/* return PR for read-only operations, otherwise return EX */
1178 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1179 +{
1180 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1181 +
1182 +       /* 0 requires EX lock */
1183 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1184 +}
1185 +
1186 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1187 +{
1188 +       int writer;
1189 +
1190 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1191 +               return 1;
1192 +
1193 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1194 +                EXT4_LB_DE;
1195 +       if (writer) /* all readers & writers are excluded? */
1196 +               return lck->lk_mode == HTREE_LOCK_EX;
1197 +
1198 +       /* all writers are excluded? */
1199 +       return lck->lk_mode == HTREE_LOCK_PR ||
1200 +              lck->lk_mode == HTREE_LOCK_PW ||
1201 +              lck->lk_mode == HTREE_LOCK_EX;
1202 +}
1203 +
1204 +/* relock htree_lock with EX mode if it's change operation, otherwise
1205 + * relock it with PR mode. It's noop if PDO is disabled. */
1206 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1207 +{
1208 +       if (!ext4_htree_safe_locked(lck)) {
1209 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1210 +
1211 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1212 +       }
1213 +}
1214 +
1215 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1216 +                    struct inode *dir, unsigned flags)
1217 +{
1218 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1219 +                                             ext4_htree_safe_mode(flags);
1220 +
1221 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1222 +       htree_lock(lck, lhead, mode);
1223 +       if (!is_dx(dir))
1224 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1225 +}
1226 +EXPORT_SYMBOL(ext4_htree_lock);
1227 +
1228 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1229 +                               unsigned lmask, int wait, void *ev)
1230 +{
1231 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1232 +       u32     mode;
1233 +
1234 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1235 +       if (ext4_htree_safe_locked(lck) ||
1236 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1237 +               return 1;
1238 +
1239 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1240 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1241 +       while (1) {
1242 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1243 +                       return 1;
1244 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1245 +                       return 0;
1246 +               cpu_relax(); /* spin until granted */
1247 +       }
1248 +}
1249 +
1250 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1251 +{
1252 +       return ext4_htree_safe_locked(lck) ||
1253 +              htree_node_is_granted(lck, ffz(~lmask));
1254 +}
1255 +
1256 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1257 +                                  unsigned lmask, void *buf)
1258 +{
1259 +       /* NB: it's safe to call mutiple times or even it's not locked */
1260 +       if (!ext4_htree_safe_locked(lck) &&
1261 +            htree_node_is_granted(lck, ffz(~lmask)))
1262 +               htree_node_unlock(lck, ffz(~lmask), buf);
1263 +}
1264 +
1265 +#define ext4_htree_dx_lock(lck, key)           \
1266 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1267 +#define ext4_htree_dx_lock_try(lck, key)       \
1268 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1269 +#define ext4_htree_dx_unlock(lck)              \
1270 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1271 +#define ext4_htree_dx_locked(lck)              \
1272 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1273 +
1274 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1275 +{
1276 +       struct ext4_dir_lock_data *ld;
1277 +
1278 +       if (ext4_htree_safe_locked(lck))
1279 +               return;
1280 +
1281 +       ld = ext4_htree_lock_data(lck);
1282 +       switch (ld->ld_flags) {
1283 +       default:
1284 +               return;
1285 +       case EXT4_HLOCK_LOOKUP:
1286 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1287 +               return;
1288 +       case EXT4_HLOCK_DEL:
1289 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1290 +               return;
1291 +       case EXT4_HLOCK_ADD:
1292 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1293 +               return;
1294 +       }
1295 +}
1296 +
1297 +#define ext4_htree_de_lock(lck, key)           \
1298 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1299 +#define ext4_htree_de_unlock(lck)              \
1300 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1301 +
1302 +#define ext4_htree_spin_lock(lck, key, event)  \
1303 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1304 +#define ext4_htree_spin_unlock(lck)            \
1305 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1306 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1307 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1308 +
1309 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1310 +{
1311 +       if (!ext4_htree_safe_locked(lck) &&
1312 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1313 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1314 +}
1315 +
1316 +enum {
1317 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1318 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1319 +       DX_HASH_COL_NO,         /* there is no collision */
1320 +};
1321 +
1322 +static int dx_probe_hash_collision(struct htree_lock *lck,
1323 +                                  struct dx_entry *entries,
1324 +                                  struct dx_entry *at, u32 hash)
1325 +{
1326 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1327 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1328 +
1329 +       } else if (at == entries + dx_get_count(entries) - 1) {
1330 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1331 +
1332 +       } else { /* hash collision? */
1333 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1334 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1335 +       }
1336 +}
1337 +
1338  /*
1339   * Probe for a directory leaf block to search.
1340   *
1341 @@ -759,10 +990,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1342   */
1343  static struct dx_frame *
1344  dx_probe(struct ext4_filename *fname, struct inode *dir,
1345 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
1346 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1347 +        struct htree_lock *lck)
1348  {
1349         unsigned count, indirect;
1350 -       struct dx_entry *at, *entries, *p, *q, *m;
1351 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1352         struct dx_root_info *info;
1353         struct dx_frame *frame = frame_in;
1354         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
1355 @@ -824,8 +1056,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1356  
1357         dxtrace(printk("Look up %x", hash));
1358         while (1) {
1359 +               if (indirect == 0) { /* the last index level */
1360 +                       /* NB: ext4_htree_dx_lock() could be noop if
1361 +                        * DX-lock flag is not set for current operation */
1362 +                       ext4_htree_dx_lock(lck, dx);
1363 +                       ext4_htree_spin_lock(lck, dx, NULL);
1364 +               }
1365                 count = dx_get_count(entries);
1366 -               if (!count || count > dx_get_limit(entries)) {
1367 +               if (count == 0 || count > dx_get_limit(entries)) {
1368 +                       ext4_htree_spin_unlock(lck); /* release spin */
1369                         ext4_warning_inode(dir,
1370                                            "dx entry: count %u beyond limit %u",
1371                                            count, dx_get_limit(entries));
1372 @@ -864,8 +1103,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1373                                dx_get_block(at)));
1374                 frame->entries = entries;
1375                 frame->at = at;
1376 -               if (!indirect--)
1377 +
1378 +               if (indirect == 0) { /* the last index level */
1379 +                       struct ext4_dir_lock_data *ld;
1380 +                       u64 myblock;
1381 +
1382 +                       /* By default we only lock DE-block, however, we will
1383 +                        * also lock the last level DX-block if:
1384 +                        * a) there is hash collision
1385 +                        *    we will set DX-lock flag (a few lines below)
1386 +                        *    and redo to lock DX-block
1387 +                        *    see detail in dx_probe_hash_collision()
1388 +                        * b) it's a retry from splitting
1389 +                        *    we need to lock the last level DX-block so nobody
1390 +                        *    else can split any leaf blocks under the same
1391 +                        *    DX-block, see detail in ext4_dx_add_entry()
1392 +                        */
1393 +                       if (ext4_htree_dx_locked(lck)) {
1394 +                               /* DX-block is locked, just lock DE-block
1395 +                                * and return */
1396 +                               ext4_htree_spin_unlock(lck);
1397 +                               if (!ext4_htree_safe_locked(lck))
1398 +                                       ext4_htree_de_lock(lck, frame->at);
1399 +                               return frame;
1400 +                       }
1401 +                       /* it's pdirop and no DX lock */
1402 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1403 +                           DX_HASH_COL_YES) {
1404 +                               /* found hash collision, set DX-lock flag
1405 +                                * and retry to abtain DX-lock */
1406 +                               ext4_htree_spin_unlock(lck);
1407 +                               ext4_htree_dx_need_lock(lck);
1408 +                               continue;
1409 +                       }
1410 +                       ld = ext4_htree_lock_data(lck);
1411 +                       /* because I don't lock DX, so @at can't be trusted
1412 +                        * after I release spinlock so I have to save it */
1413 +                       ld->ld_at = at;
1414 +                       ld->ld_at_entry = *at;
1415 +                       ld->ld_count = dx_get_count(entries);
1416 +
1417 +                       frame->at = &ld->ld_at_entry;
1418 +                       myblock = dx_get_block(at);
1419 +
1420 +                       /* NB: ordering locking */
1421 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1422 +                       /* other thread can split this DE-block because:
1423 +                        * a) I don't have lock for the DE-block yet
1424 +                        * b) I released spinlock on DX-block
1425 +                        * if it happened I can detect it by listening
1426 +                        * splitting event on this DE-block */
1427 +                       ext4_htree_de_lock(lck, frame->at);
1428 +                       ext4_htree_spin_stop_listen(lck);
1429 +
1430 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1431 +                               /* someone split this DE-block before
1432 +                                * I locked it, I need to retry and lock
1433 +                                * valid DE-block */
1434 +                               ext4_htree_de_unlock(lck);
1435 +                               continue;
1436 +                       }
1437                         return frame;
1438 +               }
1439 +               dx = at;
1440 +               indirect--;
1441                 frame++;
1442                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1443                 if (IS_ERR(frame->bh)) {
1444 @@ -934,7 +1235,7 @@ static void dx_release(struct dx_frame *frames)
1445  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1446                                  struct dx_frame *frame,
1447                                  struct dx_frame *frames,
1448 -                                __u32 *start_hash)
1449 +                                __u32 *start_hash, struct htree_lock *lck)
1450  {
1451         struct dx_frame *p;
1452         struct buffer_head *bh;
1453 @@ -949,12 +1250,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1454          * this loop, num_frames indicates the number of interior
1455          * nodes need to be read.
1456          */
1457 +       ext4_htree_de_unlock(lck);
1458         while (1) {
1459 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1460 -                       break;
1461 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1462 +                       /* num_frames > 0 :
1463 +                        *   DX block
1464 +                        * ext4_htree_dx_locked:
1465 +                        *   frame->at is reliable pointer returned by dx_probe,
1466 +                        *   otherwise dx_probe already knew no collision */
1467 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1468 +                               break;
1469 +               }
1470                 if (p == frames)
1471                         return 0;
1472                 num_frames++;
1473 +               if (num_frames == 1)
1474 +                       ext4_htree_dx_unlock(lck);
1475                 p--;
1476         }
1477  
1478 @@ -977,6 +1288,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1479          * block so no check is necessary
1480          */
1481         while (num_frames--) {
1482 +               if (num_frames == 0) {
1483 +                       /* it's not always necessary, we just don't want to
1484 +                        * detect hash collision again */
1485 +                       ext4_htree_dx_need_lock(lck);
1486 +                       ext4_htree_dx_lock(lck, p->at);
1487 +               }
1488 +
1489                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1490                 if (IS_ERR(bh))
1491                         return PTR_ERR(bh);
1492 @@ -985,6 +1303,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1493                 p->bh = bh;
1494                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1495         }
1496 +       ext4_htree_de_lock(lck, p->at);
1497         return 1;
1498  }
1499  
1500 @@ -1132,10 +1451,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1501         }
1502         hinfo.hash = start_hash;
1503         hinfo.minor_hash = 0;
1504 -       frame = dx_probe(NULL, dir, &hinfo, frames);
1505 +       /* assume it's PR locked */
1506 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
1507         if (IS_ERR(frame))
1508                 return PTR_ERR(frame);
1509 -
1510         /* Add '.' and '..' from the htree header */
1511         if (!start_hash && !start_minor_hash) {
1512                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1513 @@ -1175,7 +1494,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1514                 count += ret;
1515                 hashval = ~0;
1516                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1517 -                                           frame, frames, &hashval);
1518 +                                           frame, frames, &hashval, NULL);
1519                 *next_hash = hashval;
1520                 if (ret < 0) {
1521                         err = ret;
1522 @@ -1451,7 +1770,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
1523  static struct buffer_head *__ext4_find_entry(struct inode *dir,
1524                                              struct ext4_filename *fname,
1525                                              struct ext4_dir_entry_2 **res_dir,
1526 -                                            int *inlined)
1527 +                                            int *inlined, struct htree_lock *lck)
1528  {
1529         struct super_block *sb;
1530         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1531 @@ -1493,7 +1812,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
1532                 goto restart;
1533         }
1534         if (is_dx(dir)) {
1535 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
1536 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
1537                 /*
1538                  * On success, or if the error was file not found,
1539                  * return.  Otherwise, fall back to doing a search the
1540 @@ -1503,6 +1822,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
1541                         goto cleanup_and_exit;
1542                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1543                                "falling back\n"));
1544 +               ext4_htree_safe_relock(lck);
1545                 ret = NULL;
1546         }
1547         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1548 @@ -1590,10 +1910,10 @@ cleanup_and_exit:
1549         return ret;
1550  }
1551  
1552 -static struct buffer_head *ext4_find_entry(struct inode *dir,
1553 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
1554                                            const struct qstr *d_name,
1555                                            struct ext4_dir_entry_2 **res_dir,
1556 -                                          int *inlined)
1557 +                                          int *inlined, struct htree_lock *lck)
1558  {
1559         int err;
1560         struct ext4_filename fname;
1561 @@ -1605,12 +1925,14 @@ static struct buffer_head *ext4_find_entry(struct inode *dir,
1562         if (err)
1563                 return ERR_PTR(err);
1564  
1565 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
1566 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
1567  
1568         ext4_fname_free_filename(&fname);
1569         return bh;
1570  }
1571  
1572 +EXPORT_SYMBOL(ext4_find_entry_locked);
1573 +
1574  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
1575                                              struct dentry *dentry,
1576                                              struct ext4_dir_entry_2 **res_dir)
1577 @@ -1625,7 +1947,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
1578         if (err)
1579                 return ERR_PTR(err);
1580  
1581 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
1582 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
1583  
1584         ext4_fname_free_filename(&fname);
1585         return bh;
1586 @@ -1633,7 +1955,8 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
1587  
1588  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1589                         struct ext4_filename *fname,
1590 -                       struct ext4_dir_entry_2 **res_dir)
1591 +                       struct ext4_dir_entry_2 **res_dir,
1592 +                       struct htree_lock *lck)
1593  {
1594         struct super_block * sb = dir->i_sb;
1595         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1596 @@ -1644,7 +1967,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1597  #ifdef CONFIG_FS_ENCRYPTION
1598         *res_dir = NULL;
1599  #endif
1600 -       frame = dx_probe(fname, dir, NULL, frames);
1601 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1602         if (IS_ERR(frame))
1603                 return (struct buffer_head *) frame;
1604         do {
1605 @@ -1666,7 +1989,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1606  
1607                 /* Check to see if we should continue to search */
1608                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
1609 -                                              frames, NULL);
1610 +                                              frames, NULL, lck);
1611                 if (retval < 0) {
1612                         ext4_warning_inode(dir,
1613                                 "error %d reading directory index block",
1614 @@ -1846,8 +2169,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1615   * Returns pointer to de in block into which the new entry will be inserted.
1616   */
1617  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1618 -                       struct buffer_head **bh,struct dx_frame *frame,
1619 -                       struct dx_hash_info *hinfo)
1620 +                       struct buffer_head **bh, struct dx_frame *frames,
1621 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1622 +                       struct htree_lock *lck)
1623  {
1624         unsigned blocksize = dir->i_sb->s_blocksize;
1625         unsigned count, continued;
1626 @@ -1908,8 +2232,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1627                                         hash2, split, count-split));
1628  
1629         /* Fancy dance to stay within two buffers */
1630 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
1631 -                             blocksize);
1632 +       if (hinfo->hash < hash2) {
1633 +               de2 = dx_move_dirents(data1, data2, map + split,
1634 +                                     count - split, blocksize);
1635 +       } else {
1636 +               /* make sure we will add entry to the same block which
1637 +                * we have already locked */
1638 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1639 +       }
1640         de = dx_pack_dirents(data1, blocksize);
1641         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1642                                            (char *) de,
1643 @@ -1927,12 +2257,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1644         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1645                         blocksize, 1));
1646  
1647 -       /* Which block gets the new entry? */
1648 -       if (hinfo->hash >= hash2) {
1649 -               swap(*bh, bh2);
1650 -               de = de2;
1651 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1652 +                            frame->at); /* notify block is being split */
1653 +       if (hinfo->hash < hash2) {
1654 +               dx_insert_block(frame, hash2 + continued, newblock);
1655 +
1656 +       } else {
1657 +               /* switch block number */
1658 +               dx_insert_block(frame, hash2 + continued,
1659 +                               dx_get_block(frame->at));
1660 +               dx_set_block(frame->at, newblock);
1661 +               (frame->at)++;
1662         }
1663 -       dx_insert_block(frame, hash2 + continued, newblock);
1664 +       ext4_htree_spin_unlock(lck);
1665 +       ext4_htree_dx_unlock(lck);
1666 +
1667         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
1668         if (err)
1669                 goto journal_error;
1670 @@ -2202,7 +2541,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
1671         if (retval)
1672                 goto out_frames;        
1673  
1674 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
1675 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
1676         if (IS_ERR(de)) {
1677                 retval = PTR_ERR(de);
1678                 goto out_frames;
1679 @@ -2312,8 +2651,8 @@ out:
1680   * may not sleep between calling this and putting something into
1681   * the entry, as someone else might have used it while you slept.
1682   */
1683 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1684 -                         struct inode *inode)
1685 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
1686 +                         struct inode *inode, struct htree_lock *lck)
1687  {
1688         struct inode *dir = d_inode(dentry->d_parent);
1689         struct buffer_head *bh = NULL;
1690 @@ -2361,9 +2700,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1691                 if (dentry->d_name.len == 2 &&
1692                     memcmp(dentry->d_name.name, "..", 2) == 0)
1693                         return ext4_update_dotdot(handle, dentry, inode);
1694 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
1695 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
1696                 if (!retval || (retval != ERR_BAD_DX_DIR))
1697                         goto out;
1698 +               ext4_htree_safe_relock(lck);
1699                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1700                 dx_fallback++;
1701                 ext4_mark_inode_dirty(handle, dir);
1702 @@ -2417,12 +2757,14 @@ out:
1703                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1704         return retval;
1705  }
1706 +EXPORT_SYMBOL(ext4_add_entry_locked);
1707  
1708  /*
1709   * Returns 0 for success, or a negative error value
1710   */
1711  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1712 -                            struct inode *dir, struct inode *inode)
1713 +                            struct inode *dir, struct inode *inode,
1714 +                            struct htree_lock *lck)
1715  {
1716         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1717         struct dx_entry *entries, *at;
1718 @@ -2434,7 +2776,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1719  
1720  again:
1721         restart = 0;
1722 -       frame = dx_probe(fname, dir, NULL, frames);
1723 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1724         if (IS_ERR(frame))
1725                 return PTR_ERR(frame);
1726         entries = frame->entries;
1727 @@ -2469,6 +2811,12 @@ again:
1728                 struct dx_node *node2;
1729                 struct buffer_head *bh2;
1730  
1731 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1732 +                       ext4_htree_safe_relock(lck);
1733 +                       restart = 1;
1734 +                       goto cleanup;
1735 +               }
1736 +
1737                 while (frame > frames) {
1738                         if (dx_get_count((frame - 1)->entries) <
1739                             dx_get_limit((frame - 1)->entries)) {
1740 @@ -2571,8 +2919,32 @@ again:
1741                         restart = 1;
1742                         goto journal_error;
1743                 }
1744 +       } else if (!ext4_htree_dx_locked(lck)) {
1745 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1746 +
1747 +               /* not well protected, require DX lock */
1748 +               ext4_htree_dx_need_lock(lck);
1749 +               at = frame > frames ? (frame - 1)->at : NULL;
1750 +
1751 +               /* NB: no risk of deadlock because it's just a try.
1752 +                *
1753 +                * NB: we check ld_count for twice, the first time before
1754 +                * having DX lock, the second time after holding DX lock.
1755 +                *
1756 +                * NB: We never free blocks for directory so far, which
1757 +                * means value returned by dx_get_count() should equal to
1758 +                * ld->ld_count if nobody split any DE-block under @at,
1759 +                * and ld->ld_at still points to valid dx_entry. */
1760 +               if ((ld->ld_count != dx_get_count(entries)) ||
1761 +                   !ext4_htree_dx_lock_try(lck, at) ||
1762 +                   (ld->ld_count != dx_get_count(entries))) {
1763 +                       restart = 1;
1764 +                       goto cleanup;
1765 +               }
1766 +               /* OK, I've got DX lock and nothing changed */
1767 +               frame->at = ld->ld_at;
1768         }
1769 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
1770 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
1771         if (IS_ERR(de)) {
1772                 err = PTR_ERR(de);
1773                 goto cleanup;
1774 @@ -2583,6 +2955,8 @@ again:
1775  journal_error:
1776         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1777  cleanup:
1778 +       ext4_htree_dx_unlock(lck);
1779 +       ext4_htree_de_unlock(lck);
1780         brelse(bh);
1781         dx_release(frames);
1782         /* @restart is true means htree-path has been changed, we need to
1783 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1784 index 0fcc33b..3cc0306 100644
1785 --- a/fs/ext4/super.c
1786 +++ b/fs/ext4/super.c
1787 @@ -1076,6 +1076,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
1788  
1789         inode_set_iversion(&ei->vfs_inode, 1);
1790         spin_lock_init(&ei->i_raw_lock);
1791 +       sema_init(&ei->i_append_sem, 1);
1792         INIT_LIST_HEAD(&ei->i_prealloc_list);
1793         spin_lock_init(&ei->i_prealloc_lock);
1794         ext4_es_init_tree(&ei->i_es_tree);
1795 diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
1796 new file mode 100644
1797 index 0000000..9dc7788
1798 --- /dev/null
1799 +++ b/include/linux/htree_lock.h
1800 @@ -0,0 +1,187 @@
1801 +/*
1802 + * include/linux/htree_lock.h
1803 + *
1804 + * Copyright (c) 2011, 2012, Intel Corporation.
1805 + *
1806 + * Author: Liang Zhen <liang@whamcloud.com>
1807 + */
1808 +
1809 +/*
1810 + * htree lock
1811 + *
1812 + * htree_lock is an advanced lock, it can support five lock modes (concept is
1813 + * taken from DLM) and it's a sleeping lock.
1814 + *
1815 + * most common use case is:
1816 + * - create a htree_lock_head for data
1817 + * - each thread (contender) creates it's own htree_lock
1818 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
1819 + *   call htree_unlock to release lock
1820 + *
1821 + * Also, there is advanced use-case which is more complex, user can have
1822 + * PW/PR lock on particular key, it's mostly used while user holding shared
1823 + * lock on the htree (CW, CR)
1824 + *
1825 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
1826 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
1827 + * ...
1828 + * htree_node_unlock(lock_node);; unlock the key
1829 + *
1830 + * Another tip is, we can have N-levels of this kind of keys, all we need to
1831 + * do is specifying N-levels while creating htree_lock_head, then we can
1832 + * lock/unlock a specific level by:
1833 + * htree_node_lock(lock_node, mode1, key1, level1...);
1834 + * do something;
1835 + * htree_node_lock(lock_node, mode1, key2, level2...);
1836 + * do something;
1837 + * htree_node_unlock(lock_node, level2);
1838 + * htree_node_unlock(lock_node, level1);
1839 + *
1840 + * NB: for multi-level, should be careful about locking order to avoid deadlock
1841 + */
1842 +
1843 +#ifndef _LINUX_HTREE_LOCK_H
1844 +#define _LINUX_HTREE_LOCK_H
1845 +
1846 +#include <linux/list.h>
1847 +#include <linux/spinlock.h>
1848 +#include <linux/sched.h>
1849 +
1850 +/*
1851 + * Lock Modes
1852 + * more details can be found here:
1853 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
1854 + */
1855 +typedef enum {
1856 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
1857 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
1858 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
1859 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
1860 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
1861 +       HTREE_LOCK_MAX,      /* number of lock modes */
1862 +} htree_lock_mode_t;
1863 +
1864 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
1865 +#define HTREE_LOCK_INVAL       0xdead10c
1866 +
1867 +enum {
1868 +       HTREE_HBITS_MIN         = 2,
1869 +       HTREE_HBITS_DEF         = 14,
1870 +       HTREE_HBITS_MAX         = 32,
1871 +};
1872 +
1873 +enum {
1874 +       HTREE_EVENT_DISABLE     = (0),
1875 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
1876 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
1877 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
1878 +};
1879 +
1880 +struct htree_lock;
1881 +
1882 +typedef void (*htree_event_cb_t)(void *target, void *event);
1883 +
1884 +struct htree_lock_child {
1885 +       struct list_head        lc_list;        /* granted list */
1886 +       htree_event_cb_t        lc_callback;    /* event callback */
1887 +       unsigned                lc_events;      /* event types */
1888 +};
1889 +
1890 +struct htree_lock_head {
1891 +       unsigned long           lh_lock;        /* bits lock */
1892 +       /* blocked lock list (htree_lock) */
1893 +       struct list_head        lh_blocked_list;
1894 +       /* # key levels */
1895 +       u16                     lh_depth;
1896 +       /* hash bits for key and limit number of locks */
1897 +       u16                     lh_hbits;
1898 +       /* counters for blocked locks */
1899 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
1900 +       /* counters for granted locks */
1901 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
1902 +       /* private data */
1903 +       void                    *lh_private;
1904 +       /* array of children locks */
1905 +       struct htree_lock_child lh_children[0];
1906 +};
1907 +
1908 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
1909 +struct htree_lock_node {
1910 +       htree_lock_mode_t       ln_mode;
1911 +       /* major hash key */
1912 +       u16                     ln_major_key;
1913 +       /* minor hash key */
1914 +       u16                     ln_minor_key;
1915 +       struct list_head        ln_major_list;
1916 +       struct list_head        ln_minor_list;
1917 +       /* alive list, all locks (granted, blocked, listening) are on it */
1918 +       struct list_head        ln_alive_list;
1919 +       /* blocked list */
1920 +       struct list_head        ln_blocked_list;
1921 +       /* granted list */
1922 +       struct list_head        ln_granted_list;
1923 +       void                    *ln_ev_target;
1924 +};
1925 +
1926 +struct htree_lock {
1927 +       struct task_struct      *lk_task;
1928 +       struct htree_lock_head  *lk_head;
1929 +       void                    *lk_private;
1930 +       unsigned                lk_depth;
1931 +       htree_lock_mode_t       lk_mode;
1932 +       struct list_head        lk_blocked_list;
1933 +       struct htree_lock_node  lk_nodes[0];
1934 +};
1935 +
1936 +/* create a lock head, which stands for a resource */
1937 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
1938 +                                             unsigned hbits, unsigned priv);
1939 +/* free a lock head */
1940 +void htree_lock_head_free(struct htree_lock_head *lhead);
1941 +/* register event callback for child lock at level @depth */
1942 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
1943 +                            unsigned events, htree_event_cb_t callback);
1944 +/* create a lock handle, which stands for a thread */
1945 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
1946 +/* free a lock handle */
1947 +void htree_lock_free(struct htree_lock *lck);
1948 +/* lock htree, when @wait is true, 0 is returned if the lock can't
1949 + * be granted immediately */
1950 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
1951 +                  htree_lock_mode_t mode, int wait);
1952 +/* unlock htree */
1953 +void htree_unlock(struct htree_lock *lck);
1954 +/* unlock and relock htree with @new_mode */
1955 +int htree_change_lock_try(struct htree_lock *lck,
1956 +                         htree_lock_mode_t new_mode, int wait);
1957 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
1958 +/* require child lock (key) of htree at level @dep, @event will be sent to all
1959 + * listeners on this @key while lock being granted */
1960 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
1961 +                       u32 key, unsigned dep, int wait, void *event);
1962 +/* release child lock at level @dep, this lock will listen on it's key
1963 + * if @event isn't NULL, event_cb will be called against @lck while granting
1964 + * any other lock at level @dep with the same key */
1965 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
1966 +/* stop listening on child lock at level @dep */
1967 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
1968 +/* for debug */
1969 +void htree_lock_stat_print(int depth);
1970 +void htree_lock_stat_reset(void);
1971 +
1972 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
1973 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
1974 +
1975 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
1976 +
1977 +#define htree_node_lock(lck, mode, key, dep)   \
1978 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
1979 +/* this is only safe in thread context of lock owner */
1980 +#define htree_node_is_granted(lck, dep)                \
1981 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
1982 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
1983 +/* this is only safe in thread context of lock owner */
1984 +#define htree_node_is_listening(lck, dep)      \
1985 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
1986 +
1987 +#endif
1988 -- 
1989 2.20.1
1990