Whamcloud - gitweb
537545d91c392d7b5a978b86a457353de273e4e1
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / sles15sp1 / ext4-pdirop.patch
1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
12
13 This patch contains:
14  - pdirops support for ldiskfs
15  - integrate with osd-ldiskfs
16
17 ---
18  fs/ext4/Makefile           |    1 
19  fs/ext4/ext4.h             |   78 +++
20  fs/ext4/htree_lock.c       |  891 +++++++++++++++++++++++++++++++++++++++++++++
21  fs/ext4/namei.c            |  446 ++++++++++++++++++++--
22  fs/ext4/super.c            |    1 
23  include/linux/htree_lock.h |  187 +++++++++
24  6 files changed, 1567 insertions(+), 37 deletions(-)
25
26 --- a/fs/ext4/Makefile
27 +++ b/fs/ext4/Makefile
28 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
29  
30  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
31                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
32 +               htree_lock.o \
33                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
34                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
35                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
36 --- a/fs/ext4/ext4.h
37 +++ b/fs/ext4/ext4.h
38 @@ -28,6 +28,7 @@
39  #include <linux/timer.h>
40  #include <linux/version.h>
41  #include <linux/wait.h>
42 +#include <linux/htree_lock.h>
43  #include <linux/sched/signal.h>
44  #include <linux/blockgroup_lock.h>
45  #include <linux/percpu_counter.h>
46 @@ -980,6 +981,9 @@ struct ext4_inode_info {
47         __u32   i_dtime;
48         ext4_fsblk_t    i_file_acl;
49  
50 +       /* following fields for parallel directory operations -bzzz */
51 +       struct semaphore i_append_sem;
52 +
53         /*
54          * i_block_group is the number of the block group which contains
55          * this file's inode.  Constant across the lifetime of the inode,
56 @@ -2162,6 +2166,72 @@ struct dx_hash_info
57   */
58  #define HASH_NB_ALWAYS         1
59  
60 +/* assume name-hash is protected by upper layer */
61 +#define EXT4_HTREE_LOCK_HASH   0
62 +
63 +enum ext4_pdo_lk_types {
64 +#if EXT4_HTREE_LOCK_HASH
65 +       EXT4_LK_HASH,
66 +#endif
67 +       EXT4_LK_DX,             /* index block */
68 +       EXT4_LK_DE,             /* directory entry block */
69 +       EXT4_LK_SPIN,           /* spinlock */
70 +       EXT4_LK_MAX,
71 +};
72 +
73 +/* read-only bit */
74 +#define EXT4_LB_RO(b)          (1 << (b))
75 +/* read + write, high bits for writer */
76 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
77 +
78 +enum ext4_pdo_lock_bits {
79 +       /* DX lock bits */
80 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
81 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
82 +       /* DE lock bits */
83 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
84 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
85 +       /* DX spinlock bits */
86 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
87 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
88 +       /* accurate searching */
89 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
90 +};
91 +
92 +enum ext4_pdo_lock_opc {
93 +       /* external */
94 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
95 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
96 +                                  EXT4_LB_EXACT),
97 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
98 +                                  EXT4_LB_EXACT),
99 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
100 +
101 +       /* internal */
102 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
103 +                                  EXT4_LB_EXACT),
104 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
105 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
106 +};
107 +
108 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
109 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
110 +
111 +extern struct htree_lock *ext4_htree_lock_alloc(void);
112 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
113 +
114 +extern void ext4_htree_lock(struct htree_lock *lck,
115 +                           struct htree_lock_head *lhead,
116 +                           struct inode *dir, unsigned flags);
117 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
118 +
119 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
120 +                                       const struct qstr *d_name,
121 +                                       struct ext4_dir_entry_2 **res_dir,
122 +                                       int *inlined, struct htree_lock *lck);
123 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
124 +                     struct inode *inode, struct htree_lock *lck);
125 +
126  struct ext4_filename {
127         const struct qstr *usr_fname;
128         struct fscrypt_str disk_name;
129 @@ -2473,11 +2543,19 @@ void ext4_insert_dentry(struct inode *in
130                         struct ext4_filename *fname, void *data);
131  static inline void ext4_update_dx_flag(struct inode *inode)
132  {
133 +       /* Disable it for ldiskfs, because going from a DX directory to
134 +        * a non-DX directory while it is in use will completely break
135 +        * the htree-locking.
136 +        * If we really want to support this operation in the future,
137 +        * we need to exclusively lock the directory at here which will
138 +        * increase complexity of code */
139 +#if 0
140         if (!ext4_has_feature_dir_index(inode->i_sb)) {
141                 /* ext4_iget() should have caught this... */
142                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
143                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
144         }
145 +#endif
146  }
147  static const unsigned char ext4_filetype_table[] = {
148         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
149 --- /dev/null
150 +++ b/fs/ext4/htree_lock.c
151 @@ -0,0 +1,891 @@
152 +/*
153 + * fs/ext4/htree_lock.c
154 + *
155 + * Copyright (c) 2011, 2012, Intel Corporation.
156 + *
157 + * Author: Liang Zhen <liang@whamcloud.com>
158 + */
159 +#include <linux/jbd2.h>
160 +#include <linux/hash.h>
161 +#include <linux/module.h>
162 +#include <linux/htree_lock.h>
163 +
164 +enum {
165 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
166 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
167 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
168 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
169 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
170 +};
171 +
172 +enum {
173 +       HTREE_LOCK_COMPAT_EX    = 0,
174 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
175 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
176 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
177 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
178 +                                 HTREE_LOCK_BIT_PW,
179 +};
180 +
181 +static int htree_lock_compat[] = {
182 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
183 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
184 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
185 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
186 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
187 +};
188 +
189 +/* max allowed htree-lock depth.
190 + * We only need depth=3 for ext4 although user can have higher value. */
191 +#define HTREE_LOCK_DEP_MAX     16
192 +
193 +#ifdef HTREE_LOCK_DEBUG
194 +
195 +static char *hl_name[] = {
196 +       [HTREE_LOCK_EX]         "EX",
197 +       [HTREE_LOCK_PW]         "PW",
198 +       [HTREE_LOCK_PR]         "PR",
199 +       [HTREE_LOCK_CW]         "CW",
200 +       [HTREE_LOCK_CR]         "CR",
201 +};
202 +
203 +/* lock stats */
204 +struct htree_lock_node_stats {
205 +       unsigned long long      blocked[HTREE_LOCK_MAX];
206 +       unsigned long long      granted[HTREE_LOCK_MAX];
207 +       unsigned long long      retried[HTREE_LOCK_MAX];
208 +       unsigned long long      events;
209 +};
210 +
211 +struct htree_lock_stats {
212 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
213 +       unsigned long long      granted[HTREE_LOCK_MAX];
214 +       unsigned long long      blocked[HTREE_LOCK_MAX];
215 +};
216 +
217 +static struct htree_lock_stats hl_stats;
218 +
219 +void htree_lock_stat_reset(void)
220 +{
221 +       memset(&hl_stats, 0, sizeof(hl_stats));
222 +}
223 +
224 +void htree_lock_stat_print(int depth)
225 +{
226 +       int     i;
227 +       int     j;
228 +
229 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
230 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
231 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
232 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
233 +       }
234 +       for (i = 0; i < depth; i++) {
235 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
236 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
237 +                       printk(KERN_DEBUG
238 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
239 +                               hl_name[j], hl_stats.nodes[i].granted[j],
240 +                               hl_stats.nodes[i].blocked[j],
241 +                               hl_stats.nodes[i].retried[j]);
242 +               }
243 +       }
244 +}
245 +
246 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
247 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
248 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
249 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
250 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
251 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
252 +
253 +#else /* !DEBUG */
254 +
255 +void htree_lock_stat_reset(void) {}
256 +void htree_lock_stat_print(int depth) {}
257 +
258 +#define lk_grant_inc(m)              do {} while (0)
259 +#define lk_block_inc(m)              do {} while (0)
260 +#define ln_grant_inc(d, m)    do {} while (0)
261 +#define ln_block_inc(d, m)    do {} while (0)
262 +#define ln_retry_inc(d, m)    do {} while (0)
263 +#define ln_event_inc(d)              do {} while (0)
264 +
265 +#endif /* DEBUG */
266 +
267 +EXPORT_SYMBOL(htree_lock_stat_reset);
268 +EXPORT_SYMBOL(htree_lock_stat_print);
269 +
270 +#define HTREE_DEP_ROOT           (-1)
271 +
272 +#define htree_spin_lock(lhead, dep)                            \
273 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
274 +#define htree_spin_unlock(lhead, dep)                          \
275 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
276 +
277 +#define htree_key_event_ignore(child, ln)                      \
278 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
279 +
280 +static int
281 +htree_key_list_empty(struct htree_lock_node *ln)
282 +{
283 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
284 +}
285 +
286 +static void
287 +htree_key_list_del_init(struct htree_lock_node *ln)
288 +{
289 +       struct htree_lock_node *tmp = NULL;
290 +
291 +       if (!list_empty(&ln->ln_minor_list)) {
292 +               tmp = list_entry(ln->ln_minor_list.next,
293 +                                struct htree_lock_node, ln_minor_list);
294 +               list_del_init(&ln->ln_minor_list);
295 +       }
296 +
297 +       if (list_empty(&ln->ln_major_list))
298 +               return;
299 +
300 +       if (tmp == NULL) { /* not on minor key list */
301 +               list_del_init(&ln->ln_major_list);
302 +       } else {
303 +               BUG_ON(!list_empty(&tmp->ln_major_list));
304 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
305 +       }
306 +}
307 +
308 +static void
309 +htree_key_list_replace_init(struct htree_lock_node *old,
310 +                           struct htree_lock_node *new)
311 +{
312 +       if (!list_empty(&old->ln_major_list))
313 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
314 +
315 +       if (!list_empty(&old->ln_minor_list))
316 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
317 +}
318 +
319 +static void
320 +htree_key_event_enqueue(struct htree_lock_child *child,
321 +                       struct htree_lock_node *ln, int dep, void *event)
322 +{
323 +       struct htree_lock_node *tmp;
324 +
325 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
326 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
327 +       if (event == NULL || htree_key_event_ignore(child, ln))
328 +               return;
329 +
330 +       /* shouldn't be a very long list */
331 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
332 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
333 +                       ln_event_inc(dep);
334 +                       if (child->lc_callback != NULL)
335 +                               child->lc_callback(tmp->ln_ev_target, event);
336 +               }
337 +       }
338 +}
339 +
340 +static int
341 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
342 +                       unsigned dep, int wait, void *event)
343 +{
344 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
345 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
346 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
347 +
348 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
349 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
350 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
351 +        * NL is only used for listener, user can't directly require NL mode */
352 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
353 +           (curln->ln_mode != HTREE_LOCK_PW &&
354 +            newln->ln_mode != HTREE_LOCK_PW)) {
355 +               /* no conflict, attach it on granted list of @curlk */
356 +               if (curln->ln_mode != HTREE_LOCK_NL) {
357 +                       list_add(&newln->ln_granted_list,
358 +                                &curln->ln_granted_list);
359 +               } else {
360 +                       /* replace key owner */
361 +                       htree_key_list_replace_init(curln, newln);
362 +               }
363 +
364 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
365 +               htree_key_event_enqueue(child, newln, dep, event);
366 +               ln_grant_inc(dep, newln->ln_mode);
367 +               return 1; /* still hold lh_lock */
368 +       }
369 +
370 +       if (!wait) { /* can't grant and don't want to wait */
371 +               ln_retry_inc(dep, newln->ln_mode);
372 +               newln->ln_mode = HTREE_LOCK_INVAL;
373 +               return -1; /* don't wait and just return -1 */
374 +       }
375 +
376 +       newlk->lk_task = current;
377 +       set_current_state(TASK_UNINTERRUPTIBLE);
378 +       /* conflict, attach it on blocked list of curlk */
379 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
380 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
381 +       ln_block_inc(dep, newln->ln_mode);
382 +
383 +       htree_spin_unlock(newlk->lk_head, dep);
384 +       /* wait to be given the lock */
385 +       if (newlk->lk_task != NULL)
386 +               schedule();
387 +       /* granted, no doubt, wake up will set me RUNNING */
388 +       if (event == NULL || htree_key_event_ignore(child, newln))
389 +               return 0; /* granted without lh_lock */
390 +
391 +       htree_spin_lock(newlk->lk_head, dep);
392 +       htree_key_event_enqueue(child, newln, dep, event);
393 +       return 1; /* still hold lh_lock */
394 +}
395 +
396 +/*
397 + * get PR/PW access to particular tree-node according to @dep and @key,
398 + * it will return -1 if @wait is false and can't immediately grant this lock.
399 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
400 + * @event if it's not NULL.
401 + * NB: ALWAYS called holding lhead::lh_lock
402 + */
403 +static int
404 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
405 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
406 +                        int wait, void *event)
407 +{
408 +       LIST_HEAD(list);
409 +       struct htree_lock       *tmp;
410 +       struct htree_lock       *tmp2;
411 +       u16                     major;
412 +       u16                     minor;
413 +       u8                      reverse;
414 +       u8                      ma_bits;
415 +       u8                      mi_bits;
416 +
417 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
418 +       BUG_ON(htree_node_is_granted(lck, dep));
419 +
420 +       key = hash_long(key, lhead->lh_hbits);
421 +
422 +       mi_bits = lhead->lh_hbits >> 1;
423 +       ma_bits = lhead->lh_hbits - mi_bits;
424 +
425 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
426 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
427 +       lck->lk_nodes[dep].ln_mode = mode;
428 +
429 +       /*
430 +        * The major key list is an ordered list, so searches are started
431 +        * at the end of the list that is numerically closer to major_key,
432 +        * so at most half of the list will be walked (for well-distributed
433 +        * keys). The list traversal aborts early if the expected key
434 +        * location is passed.
435 +        */
436 +       reverse = (major >= (1 << (ma_bits - 1)));
437 +
438 +       if (reverse) {
439 +               list_for_each_entry_reverse(tmp,
440 +                                       &lhead->lh_children[dep].lc_list,
441 +                                       lk_nodes[dep].ln_major_list) {
442 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
443 +                               goto search_minor;
444 +
445 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
446 +                               /* attach _after_ @tmp */
447 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
448 +                                        &tmp->lk_nodes[dep].ln_major_list);
449 +                               goto out_grant_major;
450 +                       }
451 +               }
452 +
453 +               list_add(&lck->lk_nodes[dep].ln_major_list,
454 +                        &lhead->lh_children[dep].lc_list);
455 +               goto out_grant_major;
456 +
457 +       } else {
458 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
459 +                                   lk_nodes[dep].ln_major_list) {
460 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
461 +                               goto search_minor;
462 +
463 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
464 +                               /* insert _before_ @tmp */
465 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
466 +                                       &tmp->lk_nodes[dep].ln_major_list);
467 +                               goto out_grant_major;
468 +                       }
469 +               }
470 +
471 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
472 +                             &lhead->lh_children[dep].lc_list);
473 +               goto out_grant_major;
474 +       }
475 +
476 + search_minor:
477 +       /*
478 +        * NB: minor_key list doesn't have a "head", @list is just a
479 +        * temporary stub for helping list searching, make sure it's removed
480 +        * after searching.
481 +        * minor_key list is an ordered list too.
482 +        */
483 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
484 +
485 +       reverse = (minor >= (1 << (mi_bits - 1)));
486 +
487 +       if (reverse) {
488 +               list_for_each_entry_reverse(tmp2, &list,
489 +                                           lk_nodes[dep].ln_minor_list) {
490 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
491 +                               goto out_enqueue;
492 +
493 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
494 +                               /* attach _after_ @tmp2 */
495 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
496 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
497 +                               goto out_grant_minor;
498 +                       }
499 +               }
500 +
501 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
502 +
503 +       } else {
504 +               list_for_each_entry(tmp2, &list,
505 +                                   lk_nodes[dep].ln_minor_list) {
506 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
507 +                               goto out_enqueue;
508 +
509 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
510 +                               /* insert _before_ @tmp2 */
511 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
512 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
513 +                               goto out_grant_minor;
514 +                       }
515 +               }
516 +
517 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
518 +       }
519 +
520 + out_grant_minor:
521 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
522 +               /* new lock @lck is the first one on minor_key list, which
523 +                * means it has the smallest minor_key and it should
524 +                * replace @tmp as minor_key owner */
525 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
526 +                                 &lck->lk_nodes[dep].ln_major_list);
527 +       }
528 +       /* remove the temporary head */
529 +       list_del(&list);
530 +
531 + out_grant_major:
532 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
533 +       return 1; /* granted with holding lh_lock */
534 +
535 + out_enqueue:
536 +       list_del(&list); /* remove temprary head */
537 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
538 +}
539 +
540 +/*
541 + * release the key of @lck at level @dep, and grant any blocked locks.
542 + * caller will still listen on @key if @event is not NULL, which means
543 + * caller can see a event (by event_cb) while granting any lock with
544 + * the same key at level @dep.
545 + * NB: ALWAYS called holding lhead::lh_lock
546 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
547 + */
548 +static void
549 +htree_node_unlock_internal(struct htree_lock_head *lhead,
550 +                          struct htree_lock *curlk, unsigned dep, void *event)
551 +{
552 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
553 +       struct htree_lock       *grtlk = NULL;
554 +       struct htree_lock_node  *grtln;
555 +       struct htree_lock       *poslk;
556 +       struct htree_lock       *tmplk;
557 +
558 +       if (!htree_node_is_granted(curlk, dep))
559 +               return;
560 +
561 +       if (!list_empty(&curln->ln_granted_list)) {
562 +               /* there is another granted lock */
563 +               grtlk = list_entry(curln->ln_granted_list.next,
564 +                                  struct htree_lock,
565 +                                  lk_nodes[dep].ln_granted_list);
566 +               list_del_init(&curln->ln_granted_list);
567 +       }
568 +
569 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
570 +               /*
571 +                * @curlk is the only granted lock, so we confirmed:
572 +                * a) curln is key owner (attached on major/minor_list),
573 +                *    so if there is any blocked lock, it should be attached
574 +                *    on curln->ln_blocked_list
575 +                * b) we always can grant the first blocked lock
576 +                */
577 +               grtlk = list_entry(curln->ln_blocked_list.next,
578 +                                  struct htree_lock,
579 +                                  lk_nodes[dep].ln_blocked_list);
580 +               BUG_ON(grtlk->lk_task == NULL);
581 +               wake_up_process(grtlk->lk_task);
582 +       }
583 +
584 +       if (event != NULL &&
585 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
586 +               curln->ln_ev_target = event;
587 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
588 +       } else {
589 +               curln->ln_mode = HTREE_LOCK_INVAL;
590 +       }
591 +
592 +       if (grtlk == NULL) { /* I must be the only one locking this key */
593 +               struct htree_lock_node *tmpln;
594 +
595 +               BUG_ON(htree_key_list_empty(curln));
596 +
597 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
598 +                       return;
599 +
600 +               /* not listening */
601 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
602 +                       htree_key_list_del_init(curln);
603 +                       return;
604 +               }
605 +
606 +               tmpln = list_entry(curln->ln_alive_list.next,
607 +                                  struct htree_lock_node, ln_alive_list);
608 +
609 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
610 +
611 +               htree_key_list_replace_init(curln, tmpln);
612 +               list_del_init(&curln->ln_alive_list);
613 +
614 +               return;
615 +       }
616 +
617 +       /* have a granted lock */
618 +       grtln = &grtlk->lk_nodes[dep];
619 +       if (!list_empty(&curln->ln_blocked_list)) {
620 +               /* only key owner can be on both lists */
621 +               BUG_ON(htree_key_list_empty(curln));
622 +
623 +               if (list_empty(&grtln->ln_blocked_list)) {
624 +                       list_add(&grtln->ln_blocked_list,
625 +                                &curln->ln_blocked_list);
626 +               }
627 +               list_del_init(&curln->ln_blocked_list);
628 +       }
629 +       /*
630 +        * NB: this is the tricky part:
631 +        * We have only two modes for child-lock (PR and PW), also,
632 +        * only owner of the key (attached on major/minor_list) can be on
633 +        * both blocked_list and granted_list, so @grtlk must be one
634 +        * of these two cases:
635 +        *
636 +        * a) @grtlk is taken from granted_list, which means we've granted
637 +        *    more than one lock so @grtlk has to be PR, the first blocked
638 +        *    lock must be PW and we can't grant it at all.
639 +        *    So even @grtlk is not owner of the key (empty blocked_list),
640 +        *    we don't care because we can't grant any lock.
641 +        * b) we just grant a new lock which is taken from head of blocked
642 +        *    list, and it should be the first granted lock, and it should
643 +        *    be the first one linked on blocked_list.
644 +        *
645 +        * Either way, we can get correct result by iterating blocked_list
646 +        * of @grtlk, and don't have to bother on how to find out
647 +        * owner of current key.
648 +        */
649 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
650 +                                lk_nodes[dep].ln_blocked_list) {
651 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
652 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
653 +                       break;
654 +               /* grant all readers */
655 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
656 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
657 +                        &grtln->ln_granted_list);
658 +
659 +               BUG_ON(poslk->lk_task == NULL);
660 +               wake_up_process(poslk->lk_task);
661 +       }
662 +
663 +       /* if @curln is the owner of this key, replace it with @grtln */
664 +       if (!htree_key_list_empty(curln))
665 +               htree_key_list_replace_init(curln, grtln);
666 +
667 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
668 +               list_del_init(&curln->ln_alive_list);
669 +}
670 +
671 +/*
672 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
673 + * and 0 only if @wait is false and can't grant it immediately
674 + */
675 +int
676 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
677 +                   u32 key, unsigned dep, int wait, void *event)
678 +{
679 +       struct htree_lock_head *lhead = lck->lk_head;
680 +       int rc;
681 +
682 +       BUG_ON(dep >= lck->lk_depth);
683 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
684 +
685 +       htree_spin_lock(lhead, dep);
686 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
687 +       if (rc != 0)
688 +               htree_spin_unlock(lhead, dep);
689 +       return rc >= 0;
690 +}
691 +EXPORT_SYMBOL(htree_node_lock_try);
692 +
693 +/* it's wrapper of htree_node_unlock_internal */
694 +void
695 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
696 +{
697 +       struct htree_lock_head *lhead = lck->lk_head;
698 +
699 +       BUG_ON(dep >= lck->lk_depth);
700 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
701 +
702 +       htree_spin_lock(lhead, dep);
703 +       htree_node_unlock_internal(lhead, lck, dep, event);
704 +       htree_spin_unlock(lhead, dep);
705 +}
706 +EXPORT_SYMBOL(htree_node_unlock);
707 +
708 +/* stop listening on child-lock level @dep */
709 +void
710 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
711 +{
712 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
713 +       struct htree_lock_node *tmp;
714 +
715 +       BUG_ON(htree_node_is_granted(lck, dep));
716 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
717 +       BUG_ON(!list_empty(&ln->ln_granted_list));
718 +
719 +       if (!htree_node_is_listening(lck, dep))
720 +               return;
721 +
722 +       htree_spin_lock(lck->lk_head, dep);
723 +       ln->ln_mode = HTREE_LOCK_INVAL;
724 +       ln->ln_ev_target = NULL;
725 +
726 +       if (htree_key_list_empty(ln)) { /* not owner */
727 +               list_del_init(&ln->ln_alive_list);
728 +               goto out;
729 +       }
730 +
731 +       /* I'm the owner... */
732 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
733 +               htree_key_list_del_init(ln);
734 +               goto out;
735 +       }
736 +
737 +       tmp = list_entry(ln->ln_alive_list.next,
738 +                        struct htree_lock_node, ln_alive_list);
739 +
740 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
741 +       htree_key_list_replace_init(ln, tmp);
742 +       list_del_init(&ln->ln_alive_list);
743 + out:
744 +       htree_spin_unlock(lck->lk_head, dep);
745 +}
746 +EXPORT_SYMBOL(htree_node_stop_listen);
747 +
748 +/* release all child-locks if we have any */
749 +static void
750 +htree_node_release_all(struct htree_lock *lck)
751 +{
752 +       int     i;
753 +
754 +       for (i = 0; i < lck->lk_depth; i++) {
755 +               if (htree_node_is_granted(lck, i))
756 +                       htree_node_unlock(lck, i, NULL);
757 +               else if (htree_node_is_listening(lck, i))
758 +                       htree_node_stop_listen(lck, i);
759 +       }
760 +}
761 +
762 +/*
763 + * obtain htree lock, it could be blocked inside if there's conflict
764 + * with any granted or blocked lock and @wait is true.
765 + * NB: ALWAYS called holding lhead::lh_lock
766 + */
767 +static int
768 +htree_lock_internal(struct htree_lock *lck, int wait)
769 +{
770 +       struct htree_lock_head *lhead = lck->lk_head;
771 +       int     granted = 0;
772 +       int     blocked = 0;
773 +       int     i;
774 +
775 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
776 +               if (lhead->lh_ngranted[i] != 0)
777 +                       granted |= 1 << i;
778 +               if (lhead->lh_nblocked[i] != 0)
779 +                       blocked |= 1 << i;
780 +       }
781 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
782 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
783 +               /* will block current lock even it just conflicts with any
784 +                * other blocked lock, so lock like EX wouldn't starve */
785 +               if (!wait)
786 +                       return -1;
787 +               lhead->lh_nblocked[lck->lk_mode]++;
788 +               lk_block_inc(lck->lk_mode);
789 +
790 +               lck->lk_task = current;
791 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
792 +
793 +retry:
794 +               set_current_state(TASK_UNINTERRUPTIBLE);
795 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
796 +               /* wait to be given the lock */
797 +               if (lck->lk_task != NULL)
798 +                       schedule();
799 +               /* granted, no doubt. wake up will set me RUNNING.
800 +                * Since thread would be waken up accidentally,
801 +                * so we need check lock whether granted or not again. */
802 +               if (!list_empty(&lck->lk_blocked_list)) {
803 +                       htree_spin_lock(lhead, HTREE_DEP_ROOT);
804 +                       if (list_empty(&lck->lk_blocked_list)) {
805 +                               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
806 +                               return 0;
807 +                       }
808 +                       goto retry;
809 +               }
810 +               return 0; /* without lh_lock */
811 +       }
812 +       lhead->lh_ngranted[lck->lk_mode]++;
813 +       lk_grant_inc(lck->lk_mode);
814 +       return 1;
815 +}
816 +
817 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
818 +static void
819 +htree_unlock_internal(struct htree_lock *lck)
820 +{
821 +       struct htree_lock_head *lhead = lck->lk_head;
822 +       struct htree_lock *tmp;
823 +       struct htree_lock *tmp2;
824 +       int granted = 0;
825 +       int i;
826 +
827 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
828 +
829 +       lhead->lh_ngranted[lck->lk_mode]--;
830 +       lck->lk_mode = HTREE_LOCK_INVAL;
831 +
832 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
833 +               if (lhead->lh_ngranted[i] != 0)
834 +                       granted |= 1 << i;
835 +       }
836 +       list_for_each_entry_safe(tmp, tmp2,
837 +                                &lhead->lh_blocked_list, lk_blocked_list) {
838 +               /* conflict with any granted lock? */
839 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
840 +                       break;
841 +
842 +               list_del_init(&tmp->lk_blocked_list);
843 +
844 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
845 +
846 +               lhead->lh_nblocked[tmp->lk_mode]--;
847 +               lhead->lh_ngranted[tmp->lk_mode]++;
848 +               granted |= 1 << tmp->lk_mode;
849 +
850 +               BUG_ON(tmp->lk_task == NULL);
851 +               wake_up_process(tmp->lk_task);
852 +       }
853 +}
854 +
855 +/* it's wrapper of htree_lock_internal and exported interface.
856 + * It always return 1 with granted lock if @wait is true, it can return 0
857 + * if @wait is false and locking request can't be granted immediately */
858 +int
859 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
860 +              htree_lock_mode_t mode, int wait)
861 +{
862 +       int     rc;
863 +
864 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
865 +       BUG_ON(lck->lk_head != NULL);
866 +       BUG_ON(lck->lk_task != NULL);
867 +
868 +       lck->lk_head = lhead;
869 +       lck->lk_mode = mode;
870 +
871 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
872 +       rc = htree_lock_internal(lck, wait);
873 +       if (rc != 0)
874 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
875 +       return rc >= 0;
876 +}
877 +EXPORT_SYMBOL(htree_lock_try);
878 +
879 +/* it's wrapper of htree_unlock_internal and exported interface.
880 + * It will release all htree_node_locks and htree_lock */
881 +void
882 +htree_unlock(struct htree_lock *lck)
883 +{
884 +       BUG_ON(lck->lk_head == NULL);
885 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
886 +
887 +       htree_node_release_all(lck);
888 +
889 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
890 +       htree_unlock_internal(lck);
891 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
892 +       lck->lk_head = NULL;
893 +       lck->lk_task = NULL;
894 +}
895 +EXPORT_SYMBOL(htree_unlock);
896 +
897 +/* change lock mode */
898 +void
899 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
900 +{
901 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
902 +       lck->lk_mode = mode;
903 +}
904 +EXPORT_SYMBOL(htree_change_mode);
905 +
906 +/* release htree lock, and lock it again with new mode.
907 + * This function will first release all htree_node_locks and htree_lock,
908 + * then try to gain htree_lock with new @mode.
909 + * It always return 1 with granted lock if @wait is true, it can return 0
910 + * if @wait is false and locking request can't be granted immediately */
911 +int
912 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
913 +{
914 +       struct htree_lock_head *lhead = lck->lk_head;
915 +       int rc;
916 +
917 +       BUG_ON(lhead == NULL);
918 +       BUG_ON(lck->lk_mode == mode);
919 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
920 +
921 +       htree_node_release_all(lck);
922 +
923 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
924 +       htree_unlock_internal(lck);
925 +       lck->lk_mode = mode;
926 +       rc = htree_lock_internal(lck, wait);
927 +       if (rc != 0)
928 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
929 +       return rc >= 0;
930 +}
931 +EXPORT_SYMBOL(htree_change_lock_try);
932 +
933 +/* create a htree_lock head with @depth levels (number of child-locks),
934 + * it is a per resoruce structure */
935 +struct htree_lock_head *
936 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
937 +{
938 +       struct htree_lock_head *lhead;
939 +       int  i;
940 +
941 +       if (depth > HTREE_LOCK_DEP_MAX) {
942 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
943 +                       depth, HTREE_LOCK_DEP_MAX);
944 +               return NULL;
945 +       }
946 +
947 +       lhead = kzalloc(offsetof(struct htree_lock_head,
948 +                                lh_children[depth]) + priv, GFP_NOFS);
949 +       if (lhead == NULL)
950 +               return NULL;
951 +
952 +       if (hbits < HTREE_HBITS_MIN)
953 +               lhead->lh_hbits = HTREE_HBITS_MIN;
954 +       else if (hbits > HTREE_HBITS_MAX)
955 +               lhead->lh_hbits = HTREE_HBITS_MAX;
956 +
957 +       lhead->lh_lock = 0;
958 +       lhead->lh_depth = depth;
959 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
960 +       if (priv > 0) {
961 +               lhead->lh_private = (void *)lhead +
962 +                       offsetof(struct htree_lock_head, lh_children[depth]);
963 +       }
964 +
965 +       for (i = 0; i < depth; i++) {
966 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
967 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
968 +       }
969 +       return lhead;
970 +}
971 +EXPORT_SYMBOL(htree_lock_head_alloc);
972 +
973 +/* free the htree_lock head */
974 +void
975 +htree_lock_head_free(struct htree_lock_head *lhead)
976 +{
977 +       int     i;
978 +
979 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
980 +       for (i = 0; i < lhead->lh_depth; i++)
981 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
982 +       kfree(lhead);
983 +}
984 +EXPORT_SYMBOL(htree_lock_head_free);
985 +
986 +/* register event callback for @events of child-lock at level @dep */
987 +void
988 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
989 +                       unsigned events, htree_event_cb_t callback)
990 +{
991 +       BUG_ON(lhead->lh_depth <= dep);
992 +       lhead->lh_children[dep].lc_events = events;
993 +       lhead->lh_children[dep].lc_callback = callback;
994 +}
995 +EXPORT_SYMBOL(htree_lock_event_attach);
996 +
997 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
998 + * extra-bytes as private data for caller */
999 +struct htree_lock *
1000 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1001 +{
1002 +       struct htree_lock *lck;
1003 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1004 +
1005 +       if (depth > HTREE_LOCK_DEP_MAX) {
1006 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1007 +                       depth, HTREE_LOCK_DEP_MAX);
1008 +               return NULL;
1009 +       }
1010 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1011 +       if (lck == NULL)
1012 +               return NULL;
1013 +
1014 +       if (pbytes != 0)
1015 +               lck->lk_private = (void *)lck + i;
1016 +       lck->lk_mode = HTREE_LOCK_INVAL;
1017 +       lck->lk_depth = depth;
1018 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1019 +
1020 +       for (i = 0; i < depth; i++) {
1021 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1022 +
1023 +               node->ln_mode = HTREE_LOCK_INVAL;
1024 +               INIT_LIST_HEAD(&node->ln_major_list);
1025 +               INIT_LIST_HEAD(&node->ln_minor_list);
1026 +               INIT_LIST_HEAD(&node->ln_alive_list);
1027 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1028 +               INIT_LIST_HEAD(&node->ln_granted_list);
1029 +       }
1030 +
1031 +       return lck;
1032 +}
1033 +EXPORT_SYMBOL(htree_lock_alloc);
1034 +
1035 +/* free htree_lock node */
1036 +void
1037 +htree_lock_free(struct htree_lock *lck)
1038 +{
1039 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1040 +       kfree(lck);
1041 +}
1042 +EXPORT_SYMBOL(htree_lock_free);
1043 --- a/fs/ext4/namei.c
1044 +++ b/fs/ext4/namei.c
1045 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
1046                                         ext4_lblk_t *block)
1047  {
1048         struct buffer_head *bh;
1049 +       struct ext4_inode_info *ei = EXT4_I(inode);
1050         int err;
1051  
1052         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1053 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
1054                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1055                 return ERR_PTR(-ENOSPC);
1056  
1057 +       /* with parallel dir operations all appends
1058 +       * have to be serialized -bzzz */
1059 +       down(&ei->i_append_sem);
1060 +
1061         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1062  
1063         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
1064 -       if (IS_ERR(bh))
1065 +       if (IS_ERR(bh)) {
1066 +               up(&ei->i_append_sem);
1067                 return bh;
1068 +       }
1069         inode->i_size += inode->i_sb->s_blocksize;
1070         EXT4_I(inode)->i_disksize = inode->i_size;
1071         BUFFER_TRACE(bh, "get_write_access");
1072         err = ext4_journal_get_write_access(handle, bh);
1073 +       up(&ei->i_append_sem);
1074         if (err) {
1075                 brelse(bh);
1076                 ext4_std_error(inode->i_sb, err);
1077 @@ -249,7 +257,8 @@ static unsigned dx_node_limit(struct ino
1078  static struct dx_frame *dx_probe(struct ext4_filename *fname,
1079                                  struct inode *dir,
1080                                  struct dx_hash_info *hinfo,
1081 -                                struct dx_frame *frame);
1082 +                                struct dx_frame *frame,
1083 +                                struct htree_lock *lck);
1084  static void dx_release(struct dx_frame *frames);
1085  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1086                        unsigned blocksize, struct dx_hash_info *hinfo,
1087 @@ -263,12 +272,13 @@ static void dx_insert_block(struct dx_fr
1088  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1089                                  struct dx_frame *frame,
1090                                  struct dx_frame *frames,
1091 -                                __u32 *start_hash);
1092 +                                __u32 *start_hash, struct htree_lock *lck);
1093  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1094                 struct ext4_filename *fname,
1095 -               struct ext4_dir_entry_2 **res_dir);
1096 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
1097  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1098 -                            struct inode *dir, struct inode *inode);
1099 +                            struct inode *dir, struct inode *inode,
1100 +                            struct htree_lock *lck);
1101  
1102  /* checksumming functions */
1103  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1104 @@ -732,6 +742,227 @@ struct stats dx_show_entries(struct dx_h
1105  }
1106  #endif /* DX_DEBUG */
1107  
1108 +/* private data for htree_lock */
1109 +struct ext4_dir_lock_data {
1110 +       unsigned                ld_flags;  /* bits-map for lock types */
1111 +       unsigned                ld_count;  /* # entries of the last DX block */
1112 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1113 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1114 +};
1115 +
1116 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1117 +#define ext4_find_entry(dir, name, dirent, inline) \
1118 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
1119 +#define ext4_add_entry(handle, dentry, inode) \
1120 +                       __ext4_add_entry(handle, dentry, inode, NULL)
1121 +
1122 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1123 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1124 +
1125 +static void ext4_htree_event_cb(void *target, void *event)
1126 +{
1127 +       u64 *block = (u64 *)target;
1128 +
1129 +       if (*block == dx_get_block((struct dx_entry *)event))
1130 +               *block = EXT4_HTREE_NODE_CHANGED;
1131 +}
1132 +
1133 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1134 +{
1135 +       struct htree_lock_head *lhead;
1136 +
1137 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1138 +       if (lhead != NULL) {
1139 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1140 +                                       ext4_htree_event_cb);
1141 +       }
1142 +       return lhead;
1143 +}
1144 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1145 +
1146 +struct htree_lock *ext4_htree_lock_alloc(void)
1147 +{
1148 +       return htree_lock_alloc(EXT4_LK_MAX,
1149 +                               sizeof(struct ext4_dir_lock_data));
1150 +}
1151 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1152 +
1153 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1154 +{
1155 +       switch (flags) {
1156 +       default: /* 0 or unknown flags require EX lock */
1157 +               return HTREE_LOCK_EX;
1158 +       case EXT4_HLOCK_READDIR:
1159 +               return HTREE_LOCK_PR;
1160 +       case EXT4_HLOCK_LOOKUP:
1161 +               return HTREE_LOCK_CR;
1162 +       case EXT4_HLOCK_DEL:
1163 +       case EXT4_HLOCK_ADD:
1164 +               return HTREE_LOCK_CW;
1165 +       }
1166 +}
1167 +
1168 +/* return PR for read-only operations, otherwise return EX */
1169 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1170 +{
1171 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1172 +
1173 +       /* 0 requires EX lock */
1174 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1175 +}
1176 +
1177 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1178 +{
1179 +       int writer;
1180 +
1181 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1182 +               return 1;
1183 +
1184 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1185 +                EXT4_LB_DE;
1186 +       if (writer) /* all readers & writers are excluded? */
1187 +               return lck->lk_mode == HTREE_LOCK_EX;
1188 +
1189 +       /* all writers are excluded? */
1190 +       return lck->lk_mode == HTREE_LOCK_PR ||
1191 +              lck->lk_mode == HTREE_LOCK_PW ||
1192 +              lck->lk_mode == HTREE_LOCK_EX;
1193 +}
1194 +
1195 +/* relock htree_lock with EX mode if it's change operation, otherwise
1196 + * relock it with PR mode. It's noop if PDO is disabled. */
1197 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1198 +{
1199 +       if (!ext4_htree_safe_locked(lck)) {
1200 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1201 +
1202 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1203 +       }
1204 +}
1205 +
1206 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1207 +                    struct inode *dir, unsigned flags)
1208 +{
1209 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1210 +                                             ext4_htree_safe_mode(flags);
1211 +
1212 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1213 +       htree_lock(lck, lhead, mode);
1214 +       if (!is_dx(dir))
1215 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1216 +}
1217 +EXPORT_SYMBOL(ext4_htree_lock);
1218 +
1219 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1220 +                               unsigned lmask, int wait, void *ev)
1221 +{
1222 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1223 +       u32     mode;
1224 +
1225 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1226 +       if (ext4_htree_safe_locked(lck) ||
1227 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1228 +               return 1;
1229 +
1230 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1231 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1232 +       while (1) {
1233 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1234 +                       return 1;
1235 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1236 +                       return 0;
1237 +               cpu_relax(); /* spin until granted */
1238 +       }
1239 +}
1240 +
1241 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1242 +{
1243 +       return ext4_htree_safe_locked(lck) ||
1244 +              htree_node_is_granted(lck, ffz(~lmask));
1245 +}
1246 +
1247 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1248 +                                  unsigned lmask, void *buf)
1249 +{
1250 +       /* NB: it's safe to call mutiple times or even it's not locked */
1251 +       if (!ext4_htree_safe_locked(lck) &&
1252 +            htree_node_is_granted(lck, ffz(~lmask)))
1253 +               htree_node_unlock(lck, ffz(~lmask), buf);
1254 +}
1255 +
1256 +#define ext4_htree_dx_lock(lck, key)           \
1257 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1258 +#define ext4_htree_dx_lock_try(lck, key)       \
1259 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1260 +#define ext4_htree_dx_unlock(lck)              \
1261 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1262 +#define ext4_htree_dx_locked(lck)              \
1263 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1264 +
1265 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1266 +{
1267 +       struct ext4_dir_lock_data *ld;
1268 +
1269 +       if (ext4_htree_safe_locked(lck))
1270 +               return;
1271 +
1272 +       ld = ext4_htree_lock_data(lck);
1273 +       switch (ld->ld_flags) {
1274 +       default:
1275 +               return;
1276 +       case EXT4_HLOCK_LOOKUP:
1277 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1278 +               return;
1279 +       case EXT4_HLOCK_DEL:
1280 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1281 +               return;
1282 +       case EXT4_HLOCK_ADD:
1283 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1284 +               return;
1285 +       }
1286 +}
1287 +
1288 +#define ext4_htree_de_lock(lck, key)           \
1289 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1290 +#define ext4_htree_de_unlock(lck)              \
1291 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1292 +
1293 +#define ext4_htree_spin_lock(lck, key, event)  \
1294 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1295 +#define ext4_htree_spin_unlock(lck)            \
1296 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1297 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1298 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1299 +
1300 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1301 +{
1302 +       if (!ext4_htree_safe_locked(lck) &&
1303 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1304 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1305 +}
1306 +
1307 +enum {
1308 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1309 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1310 +       DX_HASH_COL_NO,         /* there is no collision */
1311 +};
1312 +
1313 +static int dx_probe_hash_collision(struct htree_lock *lck,
1314 +                                  struct dx_entry *entries,
1315 +                                  struct dx_entry *at, u32 hash)
1316 +{
1317 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1318 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1319 +
1320 +       } else if (at == entries + dx_get_count(entries) - 1) {
1321 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1322 +
1323 +       } else { /* hash collision? */
1324 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1325 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1326 +       }
1327 +}
1328 +
1329  /*
1330   * Probe for a directory leaf block to search.
1331   *
1332 @@ -743,10 +974,11 @@ struct stats dx_show_entries(struct dx_h
1333   */
1334  static struct dx_frame *
1335  dx_probe(struct ext4_filename *fname, struct inode *dir,
1336 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
1337 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1338 +        struct htree_lock *lck)
1339  {
1340         unsigned count, indirect;
1341 -       struct dx_entry *at, *entries, *p, *q, *m;
1342 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1343         struct dx_root_info *info;
1344         struct dx_frame *frame = frame_in;
1345         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
1346 @@ -809,8 +1041,15 @@ dx_probe(struct ext4_filename *fname, st
1347  
1348         dxtrace(printk("Look up %x", hash));
1349         while (1) {
1350 +               if (indirect == 0) { /* the last index level */
1351 +                       /* NB: ext4_htree_dx_lock() could be noop if
1352 +                        * DX-lock flag is not set for current operation */
1353 +                       ext4_htree_dx_lock(lck, dx);
1354 +                       ext4_htree_spin_lock(lck, dx, NULL);
1355 +               }
1356                 count = dx_get_count(entries);
1357 -               if (!count || count > dx_get_limit(entries)) {
1358 +               if (count == 0 || count > dx_get_limit(entries)) {
1359 +                       ext4_htree_spin_unlock(lck); /* release spin */
1360                         ext4_warning_inode(dir,
1361                                            "dx entry: count %u beyond limit %u",
1362                                            count, dx_get_limit(entries));
1363 @@ -849,8 +1088,70 @@ dx_probe(struct ext4_filename *fname, st
1364                                dx_get_block(at)));
1365                 frame->entries = entries;
1366                 frame->at = at;
1367 -               if (!indirect--)
1368 +
1369 +               if (indirect == 0) { /* the last index level */
1370 +                       struct ext4_dir_lock_data *ld;
1371 +                       u64 myblock;
1372 +
1373 +                       /* By default we only lock DE-block, however, we will
1374 +                        * also lock the last level DX-block if:
1375 +                        * a) there is hash collision
1376 +                        *    we will set DX-lock flag (a few lines below)
1377 +                        *    and redo to lock DX-block
1378 +                        *    see detail in dx_probe_hash_collision()
1379 +                        * b) it's a retry from splitting
1380 +                        *    we need to lock the last level DX-block so nobody
1381 +                        *    else can split any leaf blocks under the same
1382 +                        *    DX-block, see detail in ext4_dx_add_entry()
1383 +                        */
1384 +                       if (ext4_htree_dx_locked(lck)) {
1385 +                               /* DX-block is locked, just lock DE-block
1386 +                                * and return */
1387 +                               ext4_htree_spin_unlock(lck);
1388 +                               if (!ext4_htree_safe_locked(lck))
1389 +                                       ext4_htree_de_lock(lck, frame->at);
1390 +                               return frame;
1391 +                       }
1392 +                       /* it's pdirop and no DX lock */
1393 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1394 +                           DX_HASH_COL_YES) {
1395 +                               /* found hash collision, set DX-lock flag
1396 +                                * and retry to abtain DX-lock */
1397 +                               ext4_htree_spin_unlock(lck);
1398 +                               ext4_htree_dx_need_lock(lck);
1399 +                               continue;
1400 +                       }
1401 +                       ld = ext4_htree_lock_data(lck);
1402 +                       /* because I don't lock DX, so @at can't be trusted
1403 +                        * after I release spinlock so I have to save it */
1404 +                       ld->ld_at = at;
1405 +                       ld->ld_at_entry = *at;
1406 +                       ld->ld_count = dx_get_count(entries);
1407 +
1408 +                       frame->at = &ld->ld_at_entry;
1409 +                       myblock = dx_get_block(at);
1410 +
1411 +                       /* NB: ordering locking */
1412 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1413 +                       /* other thread can split this DE-block because:
1414 +                        * a) I don't have lock for the DE-block yet
1415 +                        * b) I released spinlock on DX-block
1416 +                        * if it happened I can detect it by listening
1417 +                        * splitting event on this DE-block */
1418 +                       ext4_htree_de_lock(lck, frame->at);
1419 +                       ext4_htree_spin_stop_listen(lck);
1420 +
1421 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1422 +                               /* someone split this DE-block before
1423 +                                * I locked it, I need to retry and lock
1424 +                                * valid DE-block */
1425 +                               ext4_htree_de_unlock(lck);
1426 +                               continue;
1427 +                       }
1428                         return frame;
1429 +               }
1430 +               dx = at;
1431 +               indirect--;
1432                 frame++;
1433                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1434                 if (IS_ERR(frame->bh)) {
1435 @@ -917,7 +1218,7 @@ static void dx_release(struct dx_frame *
1436  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1437                                  struct dx_frame *frame,
1438                                  struct dx_frame *frames,
1439 -                                __u32 *start_hash)
1440 +                                __u32 *start_hash, struct htree_lock *lck)
1441  {
1442         struct dx_frame *p;
1443         struct buffer_head *bh;
1444 @@ -932,12 +1233,22 @@ static int ext4_htree_next_block(struct
1445          * this loop, num_frames indicates the number of interior
1446          * nodes need to be read.
1447          */
1448 +       ext4_htree_de_unlock(lck);
1449         while (1) {
1450 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1451 -                       break;
1452 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1453 +                       /* num_frames > 0 :
1454 +                        *   DX block
1455 +                        * ext4_htree_dx_locked:
1456 +                        *   frame->at is reliable pointer returned by dx_probe,
1457 +                        *   otherwise dx_probe already knew no collision */
1458 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1459 +                               break;
1460 +               }
1461                 if (p == frames)
1462                         return 0;
1463                 num_frames++;
1464 +               if (num_frames == 1)
1465 +                       ext4_htree_dx_unlock(lck);
1466                 p--;
1467         }
1468  
1469 @@ -960,6 +1271,13 @@ static int ext4_htree_next_block(struct
1470          * block so no check is necessary
1471          */
1472         while (num_frames--) {
1473 +               if (num_frames == 0) {
1474 +                       /* it's not always necessary, we just don't want to
1475 +                        * detect hash collision again */
1476 +                       ext4_htree_dx_need_lock(lck);
1477 +                       ext4_htree_dx_lock(lck, p->at);
1478 +               }
1479 +
1480                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1481                 if (IS_ERR(bh))
1482                         return PTR_ERR(bh);
1483 @@ -968,6 +1286,7 @@ static int ext4_htree_next_block(struct
1484                 p->bh = bh;
1485                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1486         }
1487 +       ext4_htree_de_lock(lck, p->at);
1488         return 1;
1489  }
1490  
1491 @@ -1115,10 +1434,10 @@ int ext4_htree_fill_tree(struct file *di
1492         }
1493         hinfo.hash = start_hash;
1494         hinfo.minor_hash = 0;
1495 -       frame = dx_probe(NULL, dir, &hinfo, frames);
1496 +       /* assume it's PR locked */
1497 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
1498         if (IS_ERR(frame))
1499                 return PTR_ERR(frame);
1500 -
1501         /* Add '.' and '..' from the htree header */
1502         if (!start_hash && !start_minor_hash) {
1503                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1504 @@ -1158,7 +1477,7 @@ int ext4_htree_fill_tree(struct file *di
1505                 count += ret;
1506                 hashval = ~0;
1507                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1508 -                                           frame, frames, &hashval);
1509 +                                           frame, frames, &hashval, NULL);
1510                 *next_hash = hashval;
1511                 if (ret < 0) {
1512                         err = ret;
1513 @@ -1350,10 +1669,10 @@ static int is_dx_internal_node(struct in
1514   * The returned buffer_head has ->b_count elevated.  The caller is expected
1515   * to brelse() it when appropriate.
1516   */
1517 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1518 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1519                                         const struct qstr *d_name,
1520                                         struct ext4_dir_entry_2 **res_dir,
1521 -                                       int *inlined)
1522 +                                       int *inlined, struct htree_lock *lck)
1523  {
1524         struct super_block *sb;
1525         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1526 @@ -1403,7 +1722,7 @@ static struct buffer_head * ext4_find_en
1527                 goto restart;
1528         }
1529         if (is_dx(dir)) {
1530 -               ret = ext4_dx_find_entry(dir, &fname, res_dir);
1531 +               ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
1532                 /*
1533                  * On success, or if the error was file not found,
1534                  * return.  Otherwise, fall back to doing a search the
1535 @@ -1413,6 +1732,7 @@ static struct buffer_head * ext4_find_en
1536                         goto cleanup_and_exit;
1537                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1538                                "falling back\n"));
1539 +               ext4_htree_safe_relock(lck);
1540                 ret = NULL;
1541         }
1542         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1543 @@ -1514,10 +1834,12 @@ cleanup_and_exit:
1544         ext4_fname_free_filename(&fname);
1545         return ret;
1546  }
1547 +EXPORT_SYMBOL(__ext4_find_entry);
1548  
1549  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1550                         struct ext4_filename *fname,
1551 -                       struct ext4_dir_entry_2 **res_dir)
1552 +                       struct ext4_dir_entry_2 **res_dir,
1553 +                       struct htree_lock *lck)
1554  {
1555         struct super_block * sb = dir->i_sb;
1556         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1557 @@ -1528,7 +1850,7 @@ static struct buffer_head * ext4_dx_find
1558  #ifdef CONFIG_EXT4_FS_ENCRYPTION
1559         *res_dir = NULL;
1560  #endif
1561 -       frame = dx_probe(fname, dir, NULL, frames);
1562 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1563         if (IS_ERR(frame))
1564                 return (struct buffer_head *) frame;
1565         do {
1566 @@ -1550,7 +1872,7 @@ static struct buffer_head * ext4_dx_find
1567  
1568                 /* Check to see if we should continue to search */
1569                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
1570 -                                              frames, NULL);
1571 +                                              frames, NULL, lck);
1572                 if (retval < 0) {
1573                         ext4_warning_inode(dir,
1574                                 "error %d reading directory index block",
1575 @@ -1735,8 +2057,9 @@ static struct ext4_dir_entry_2* dx_pack_
1576   * Returns pointer to de in block into which the new entry will be inserted.
1577   */
1578  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1579 -                       struct buffer_head **bh,struct dx_frame *frame,
1580 -                       struct dx_hash_info *hinfo)
1581 +                       struct buffer_head **bh, struct dx_frame *frames,
1582 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1583 +                       struct htree_lock *lck)
1584  {
1585         unsigned blocksize = dir->i_sb->s_blocksize;
1586         unsigned count, continued;
1587 @@ -1798,8 +2121,14 @@ static struct ext4_dir_entry_2 *do_split
1588                                         hash2, split, count-split));
1589  
1590         /* Fancy dance to stay within two buffers */
1591 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
1592 -                             blocksize);
1593 +       if (hinfo->hash < hash2) {
1594 +               de2 = dx_move_dirents(data1, data2, map + split,
1595 +                                     count - split, blocksize);
1596 +       } else {
1597 +               /* make sure we will add entry to the same block which
1598 +                * we have already locked */
1599 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1600 +       }
1601         de = dx_pack_dirents(data1, blocksize);
1602         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1603                                            (char *) de,
1604 @@ -1820,12 +2149,21 @@ static struct ext4_dir_entry_2 *do_split
1605         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1606                         blocksize, 1));
1607  
1608 -       /* Which block gets the new entry? */
1609 -       if (hinfo->hash >= hash2) {
1610 -               swap(*bh, bh2);
1611 -               de = de2;
1612 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1613 +                            frame->at); /* notify block is being split */
1614 +       if (hinfo->hash < hash2) {
1615 +               dx_insert_block(frame, hash2 + continued, newblock);
1616 +
1617 +       } else {
1618 +               /* switch block number */
1619 +               dx_insert_block(frame, hash2 + continued,
1620 +                               dx_get_block(frame->at));
1621 +               dx_set_block(frame->at, newblock);
1622 +               (frame->at)++;
1623         }
1624 -       dx_insert_block(frame, hash2 + continued, newblock);
1625 +       ext4_htree_spin_unlock(lck);
1626 +       ext4_htree_dx_unlock(lck);
1627 +
1628         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1629         if (err)
1630                 goto journal_error;
1631 @@ -2099,7 +2437,7 @@ static int make_indexed_dir(handle_t *ha
1632         if (retval)
1633                 goto out_frames;        
1634  
1635 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
1636 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
1637         if (IS_ERR(de)) {
1638                 retval = PTR_ERR(de);
1639                 goto out_frames;
1640 @@ -2209,8 +2547,8 @@ out:
1641   * may not sleep between calling this and putting something into
1642   * the entry, as someone else might have used it while you slept.
1643   */
1644 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1645 -                         struct inode *inode)
1646 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1647 +                     struct inode *inode, struct htree_lock *lck)
1648  {
1649         struct inode *dir = d_inode(dentry->d_parent);
1650         struct buffer_head *bh = NULL;
1651 @@ -2251,7 +2589,7 @@ static int ext4_add_entry(handle_t *hand
1652                 if (dentry->d_name.len == 2 &&
1653                     memcmp(dentry->d_name.name, "..", 2) == 0)
1654                         return ext4_update_dotdot(handle, dentry, inode);
1655 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
1656 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
1657                 if (!retval || (retval != ERR_BAD_DX_DIR))
1658                         goto out;
1659                 /* Can we just ignore htree data? */
1660 @@ -2261,6 +2599,7 @@ static int ext4_add_entry(handle_t *hand
1661                         retval = -EFSCORRUPTED;
1662                         goto out;
1663                 }
1664 +               ext4_htree_safe_relock(lck);
1665                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1666                 dx_fallback++;
1667                 ext4_mark_inode_dirty(handle, dir);
1668 @@ -2310,12 +2649,14 @@ out:
1669                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1670         return retval;
1671  }
1672 +EXPORT_SYMBOL(__ext4_add_entry);
1673  
1674  /*
1675   * Returns 0 for success, or a negative error value
1676   */
1677  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1678 -                            struct inode *dir, struct inode *inode)
1679 +                            struct inode *dir, struct inode *inode,
1680 +                            struct htree_lock *lck)
1681  {
1682         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1683         struct dx_entry *entries, *at;
1684 @@ -2327,7 +2668,7 @@ static int ext4_dx_add_entry(handle_t *h
1685  
1686  again:
1687         restart = 0;
1688 -       frame = dx_probe(fname, dir, NULL, frames);
1689 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1690         if (IS_ERR(frame))
1691                 return PTR_ERR(frame);
1692         entries = frame->entries;
1693 @@ -2362,6 +2703,11 @@ again:
1694                 struct dx_node *node2;
1695                 struct buffer_head *bh2;
1696  
1697 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1698 +                       ext4_htree_safe_relock(lck);
1699 +                       restart = 1;
1700 +                       goto cleanup;
1701 +               }
1702                 while (frame > frames) {
1703                         if (dx_get_count((frame - 1)->entries) <
1704                             dx_get_limit((frame - 1)->entries)) {
1705 @@ -2465,8 +2811,32 @@ again:
1706                         restart = 1;
1707                         goto journal_error;
1708                 }
1709 +       } else if (!ext4_htree_dx_locked(lck)) {
1710 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1711 +
1712 +               /* not well protected, require DX lock */
1713 +               ext4_htree_dx_need_lock(lck);
1714 +               at = frame > frames ? (frame - 1)->at : NULL;
1715 +
1716 +               /* NB: no risk of deadlock because it's just a try.
1717 +                *
1718 +                * NB: we check ld_count for twice, the first time before
1719 +                * having DX lock, the second time after holding DX lock.
1720 +                *
1721 +                * NB: We never free blocks for directory so far, which
1722 +                * means value returned by dx_get_count() should equal to
1723 +                * ld->ld_count if nobody split any DE-block under @at,
1724 +                * and ld->ld_at still points to valid dx_entry. */
1725 +               if ((ld->ld_count != dx_get_count(entries)) ||
1726 +                   !ext4_htree_dx_lock_try(lck, at) ||
1727 +                   (ld->ld_count != dx_get_count(entries))) {
1728 +                       restart = 1;
1729 +                       goto cleanup;
1730 +               }
1731 +               /* OK, I've got DX lock and nothing changed */
1732 +               frame->at = ld->ld_at;
1733         }
1734 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
1735 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
1736         if (IS_ERR(de)) {
1737                 err = PTR_ERR(de);
1738                 goto cleanup;
1739 @@ -2477,6 +2847,8 @@ again:
1740  journal_error:
1741         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1742  cleanup:
1743 +       ext4_htree_dx_unlock(lck);
1744 +       ext4_htree_de_unlock(lck);
1745         brelse(bh);
1746         dx_release(frames);
1747         /* @restart is true means htree-path has been changed, we need to
1748 --- a/fs/ext4/super.c
1749 +++ b/fs/ext4/super.c
1750 @@ -989,6 +989,7 @@ static struct inode *ext4_alloc_inode(st
1751  
1752         inode_set_iversion(&ei->vfs_inode, 1);
1753         spin_lock_init(&ei->i_raw_lock);
1754 +       sema_init(&ei->i_append_sem, 1);
1755         INIT_LIST_HEAD(&ei->i_prealloc_list);
1756         spin_lock_init(&ei->i_prealloc_lock);
1757         ext4_es_init_tree(&ei->i_es_tree);
1758 --- /dev/null
1759 +++ b/include/linux/htree_lock.h
1760 @@ -0,0 +1,187 @@
1761 +/*
1762 + * include/linux/htree_lock.h
1763 + *
1764 + * Copyright (c) 2011, 2012, Intel Corporation.
1765 + *
1766 + * Author: Liang Zhen <liang@whamcloud.com>
1767 + */
1768 +
1769 +/*
1770 + * htree lock
1771 + *
1772 + * htree_lock is an advanced lock, it can support five lock modes (concept is
1773 + * taken from DLM) and it's a sleeping lock.
1774 + *
1775 + * most common use case is:
1776 + * - create a htree_lock_head for data
1777 + * - each thread (contender) creates it's own htree_lock
1778 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
1779 + *   call htree_unlock to release lock
1780 + *
1781 + * Also, there is advanced use-case which is more complex, user can have
1782 + * PW/PR lock on particular key, it's mostly used while user holding shared
1783 + * lock on the htree (CW, CR)
1784 + *
1785 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
1786 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
1787 + * ...
1788 + * htree_node_unlock(lock_node);; unlock the key
1789 + *
1790 + * Another tip is, we can have N-levels of this kind of keys, all we need to
1791 + * do is specifying N-levels while creating htree_lock_head, then we can
1792 + * lock/unlock a specific level by:
1793 + * htree_node_lock(lock_node, mode1, key1, level1...);
1794 + * do something;
1795 + * htree_node_lock(lock_node, mode1, key2, level2...);
1796 + * do something;
1797 + * htree_node_unlock(lock_node, level2);
1798 + * htree_node_unlock(lock_node, level1);
1799 + *
1800 + * NB: for multi-level, should be careful about locking order to avoid deadlock
1801 + */
1802 +
1803 +#ifndef _LINUX_HTREE_LOCK_H
1804 +#define _LINUX_HTREE_LOCK_H
1805 +
1806 +#include <linux/list.h>
1807 +#include <linux/spinlock.h>
1808 +#include <linux/sched.h>
1809 +
1810 +/*
1811 + * Lock Modes
1812 + * more details can be found here:
1813 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
1814 + */
1815 +typedef enum {
1816 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
1817 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
1818 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
1819 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
1820 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
1821 +       HTREE_LOCK_MAX,      /* number of lock modes */
1822 +} htree_lock_mode_t;
1823 +
1824 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
1825 +#define HTREE_LOCK_INVAL       0xdead10c
1826 +
1827 +enum {
1828 +       HTREE_HBITS_MIN         = 2,
1829 +       HTREE_HBITS_DEF         = 14,
1830 +       HTREE_HBITS_MAX         = 32,
1831 +};
1832 +
1833 +enum {
1834 +       HTREE_EVENT_DISABLE     = (0),
1835 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
1836 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
1837 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
1838 +};
1839 +
1840 +struct htree_lock;
1841 +
1842 +typedef void (*htree_event_cb_t)(void *target, void *event);
1843 +
1844 +struct htree_lock_child {
1845 +       struct list_head        lc_list;        /* granted list */
1846 +       htree_event_cb_t        lc_callback;    /* event callback */
1847 +       unsigned                lc_events;      /* event types */
1848 +};
1849 +
1850 +struct htree_lock_head {
1851 +       unsigned long           lh_lock;        /* bits lock */
1852 +       /* blocked lock list (htree_lock) */
1853 +       struct list_head        lh_blocked_list;
1854 +       /* # key levels */
1855 +       u16                     lh_depth;
1856 +       /* hash bits for key and limit number of locks */
1857 +       u16                     lh_hbits;
1858 +       /* counters for blocked locks */
1859 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
1860 +       /* counters for granted locks */
1861 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
1862 +       /* private data */
1863 +       void                    *lh_private;
1864 +       /* array of children locks */
1865 +       struct htree_lock_child lh_children[0];
1866 +};
1867 +
1868 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
1869 +struct htree_lock_node {
1870 +       htree_lock_mode_t       ln_mode;
1871 +       /* major hash key */
1872 +       u16                     ln_major_key;
1873 +       /* minor hash key */
1874 +       u16                     ln_minor_key;
1875 +       struct list_head        ln_major_list;
1876 +       struct list_head        ln_minor_list;
1877 +       /* alive list, all locks (granted, blocked, listening) are on it */
1878 +       struct list_head        ln_alive_list;
1879 +       /* blocked list */
1880 +       struct list_head        ln_blocked_list;
1881 +       /* granted list */
1882 +       struct list_head        ln_granted_list;
1883 +       void                    *ln_ev_target;
1884 +};
1885 +
1886 +struct htree_lock {
1887 +       struct task_struct      *lk_task;
1888 +       struct htree_lock_head  *lk_head;
1889 +       void                    *lk_private;
1890 +       unsigned                lk_depth;
1891 +       htree_lock_mode_t       lk_mode;
1892 +       struct list_head        lk_blocked_list;
1893 +       struct htree_lock_node  lk_nodes[0];
1894 +};
1895 +
1896 +/* create a lock head, which stands for a resource */
1897 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
1898 +                                             unsigned hbits, unsigned priv);
1899 +/* free a lock head */
1900 +void htree_lock_head_free(struct htree_lock_head *lhead);
1901 +/* register event callback for child lock at level @depth */
1902 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
1903 +                            unsigned events, htree_event_cb_t callback);
1904 +/* create a lock handle, which stands for a thread */
1905 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
1906 +/* free a lock handle */
1907 +void htree_lock_free(struct htree_lock *lck);
1908 +/* lock htree, when @wait is true, 0 is returned if the lock can't
1909 + * be granted immediately */
1910 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
1911 +                  htree_lock_mode_t mode, int wait);
1912 +/* unlock htree */
1913 +void htree_unlock(struct htree_lock *lck);
1914 +/* unlock and relock htree with @new_mode */
1915 +int htree_change_lock_try(struct htree_lock *lck,
1916 +                         htree_lock_mode_t new_mode, int wait);
1917 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
1918 +/* require child lock (key) of htree at level @dep, @event will be sent to all
1919 + * listeners on this @key while lock being granted */
1920 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
1921 +                       u32 key, unsigned dep, int wait, void *event);
1922 +/* release child lock at level @dep, this lock will listen on it's key
1923 + * if @event isn't NULL, event_cb will be called against @lck while granting
1924 + * any other lock at level @dep with the same key */
1925 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
1926 +/* stop listening on child lock at level @dep */
1927 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
1928 +/* for debug */
1929 +void htree_lock_stat_print(int depth);
1930 +void htree_lock_stat_reset(void);
1931 +
1932 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
1933 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
1934 +
1935 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
1936 +
1937 +#define htree_node_lock(lck, mode, key, dep)   \
1938 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
1939 +/* this is only safe in thread context of lock owner */
1940 +#define htree_node_is_granted(lck, dep)                \
1941 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
1942 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
1943 +/* this is only safe in thread context of lock owner */
1944 +#define htree_node_is_listening(lck, dep)      \
1945 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
1946 +
1947 +#endif