Whamcloud - gitweb
LU-11851 ldiskfs: reschedule for htree thread.
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ubuntu14+16 / ext4-pdirop-001.patch
1 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
2 index f52cf54..3f16939 100644
3 --- a/fs/ext4/Makefile
4 +++ b/fs/ext4/Makefile
5 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
6  
7  ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
8                 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
9 +               htree_lock.o \
10                 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
11                 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
12                 xattr_trusted.o inline.o readpage.o sysfs.o
13 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
14 index 2d22f1a..005c9b3 100644
15 --- a/fs/ext4/ext4.h
16 +++ b/fs/ext4/ext4.h
17 @@ -28,6 +28,7 @@
18  #include <linux/timer.h>
19  #include <linux/version.h>
20  #include <linux/wait.h>
21 +#include <linux/htree_lock.h>
22  #include <linux/blockgroup_lock.h>
23  #include <linux/percpu_counter.h>
24  #include <linux/ratelimit.h>
25 @@ -880,6 +881,9 @@ struct ext4_inode_info {
26         __u32   i_dtime;
27         ext4_fsblk_t    i_file_acl;
28  
29 +       /* following fields for parallel directory operations -bzzz */
30 +       struct semaphore i_append_sem;
31 +
32         /*
33          * i_block_group is the number of the block group which contains
34          * this file's inode.  Constant across the lifetime of the inode,
35 @@ -2086,6 +2090,71 @@ struct dx_hash_info
36         u32             *seed;
37  };
38  
39 +/* assume name-hash is protected by upper layer */
40 +#define EXT4_HTREE_LOCK_HASH   0
41 +
42 +enum ext4_pdo_lk_types {
43 +#if EXT4_HTREE_LOCK_HASH
44 +       EXT4_LK_HASH,
45 +#endif
46 +       EXT4_LK_DX,             /* index block */
47 +       EXT4_LK_DE,             /* directory entry block */
48 +       EXT4_LK_SPIN,           /* spinlock */
49 +       EXT4_LK_MAX,
50 +};
51 +
52 +/* read-only bit */
53 +#define EXT4_LB_RO(b)          (1 << (b))
54 +/* read + write, high bits for writer */
55 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
56 +
57 +enum ext4_pdo_lock_bits {
58 +       /* DX lock bits */
59 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
60 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
61 +       /* DE lock bits */
62 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
63 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
64 +       /* DX spinlock bits */
65 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
66 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
67 +       /* accurate searching */
68 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
69 +};
70 +
71 +enum ext4_pdo_lock_opc {
72 +       /* external */
73 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
74 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
75 +                                  EXT4_LB_EXACT),
76 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
77 +                                  EXT4_LB_EXACT),
78 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
79 +
80 +       /* internal */
81 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
82 +                                  EXT4_LB_EXACT),
83 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
84 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
85 +};
86 +
87 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
88 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
89 +
90 +extern struct htree_lock *ext4_htree_lock_alloc(void);
91 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
92 +
93 +extern void ext4_htree_lock(struct htree_lock *lck,
94 +                           struct htree_lock_head *lhead,
95 +                           struct inode *dir, unsigned flags);
96 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
97 +
98 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
99 +                                       const struct qstr *d_name,
100 +                                       struct ext4_dir_entry_2 **res_dir,
101 +                                       int *inlined, struct htree_lock *lck);
102 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
103 +                     struct inode *inode, struct htree_lock *lck);
104  
105  /* 32 and 64 bit signed EOF for dx directories */
106  #define EXT4_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
107 @@ -2475,8 +2544,16 @@ int ext4_insert_dentry(struct inode *dir,
108                        struct ext4_filename *fname, void *data);
109  static inline void ext4_update_dx_flag(struct inode *inode)
110  {
111 +       /* Disable it for ldiskfs, because going from a DX directory to
112 +        * a non-DX directory while it is in use will completely break
113 +        * the htree-locking.
114 +        * If we really want to support this operation in the future,
115 +        * we need to exclusively lock the directory at here which will
116 +        * increase complexity of code */
117 +#if 0
118         if (!ext4_has_feature_dir_index(inode->i_sb))
119                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
120 +#endif
121  }
122  static unsigned char ext4_filetype_table[] = {
123         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
124 diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
125 new file mode 100644
126 index 0000000..99e7375
127 --- /dev/null
128 +++ b/fs/ext4/htree_lock.c
129 @@ -0,0 +1,891 @@
130 +/*
131 + * fs/ext4/htree_lock.c
132 + *
133 + * Copyright (c) 2011, 2012, Intel Corporation.
134 + *
135 + * Author: Liang Zhen <liang@whamcloud.com>
136 + */
137 +#include <linux/jbd2.h>
138 +#include <linux/hash.h>
139 +#include <linux/module.h>
140 +#include <linux/htree_lock.h>
141 +
142 +enum {
143 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
144 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
145 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
146 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
147 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
148 +};
149 +
150 +enum {
151 +       HTREE_LOCK_COMPAT_EX    = 0,
152 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
153 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
154 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
155 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
156 +                                 HTREE_LOCK_BIT_PW,
157 +};
158 +
159 +static int htree_lock_compat[] = {
160 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
161 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
162 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
163 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
164 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
165 +};
166 +
167 +/* max allowed htree-lock depth.
168 + * We only need depth=3 for ext4 although user can have higher value. */
169 +#define HTREE_LOCK_DEP_MAX     16
170 +
171 +#ifdef HTREE_LOCK_DEBUG
172 +
173 +static char *hl_name[] = {
174 +       [HTREE_LOCK_EX]         "EX",
175 +       [HTREE_LOCK_PW]         "PW",
176 +       [HTREE_LOCK_PR]         "PR",
177 +       [HTREE_LOCK_CW]         "CW",
178 +       [HTREE_LOCK_CR]         "CR",
179 +};
180 +
181 +/* lock stats */
182 +struct htree_lock_node_stats {
183 +       unsigned long long      blocked[HTREE_LOCK_MAX];
184 +       unsigned long long      granted[HTREE_LOCK_MAX];
185 +       unsigned long long      retried[HTREE_LOCK_MAX];
186 +       unsigned long long      events;
187 +};
188 +
189 +struct htree_lock_stats {
190 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
191 +       unsigned long long      granted[HTREE_LOCK_MAX];
192 +       unsigned long long      blocked[HTREE_LOCK_MAX];
193 +};
194 +
195 +static struct htree_lock_stats hl_stats;
196 +
197 +void htree_lock_stat_reset(void)
198 +{
199 +       memset(&hl_stats, 0, sizeof(hl_stats));
200 +}
201 +
202 +void htree_lock_stat_print(int depth)
203 +{
204 +       int     i;
205 +       int     j;
206 +
207 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
208 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
209 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
210 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
211 +       }
212 +       for (i = 0; i < depth; i++) {
213 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
214 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
215 +                       printk(KERN_DEBUG
216 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
217 +                               hl_name[j], hl_stats.nodes[i].granted[j],
218 +                               hl_stats.nodes[i].blocked[j],
219 +                               hl_stats.nodes[i].retried[j]);
220 +               }
221 +       }
222 +}
223 +
224 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
225 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
226 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
227 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
228 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
229 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
230 +
231 +#else /* !DEBUG */
232 +
233 +void htree_lock_stat_reset(void) {}
234 +void htree_lock_stat_print(int depth) {}
235 +
236 +#define lk_grant_inc(m)              do {} while (0)
237 +#define lk_block_inc(m)              do {} while (0)
238 +#define ln_grant_inc(d, m)    do {} while (0)
239 +#define ln_block_inc(d, m)    do {} while (0)
240 +#define ln_retry_inc(d, m)    do {} while (0)
241 +#define ln_event_inc(d)              do {} while (0)
242 +
243 +#endif /* DEBUG */
244 +
245 +EXPORT_SYMBOL(htree_lock_stat_reset);
246 +EXPORT_SYMBOL(htree_lock_stat_print);
247 +
248 +#define HTREE_DEP_ROOT           (-1)
249 +
250 +#define htree_spin_lock(lhead, dep)                            \
251 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
252 +#define htree_spin_unlock(lhead, dep)                          \
253 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
254 +
255 +#define htree_key_event_ignore(child, ln)                      \
256 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
257 +
258 +static int
259 +htree_key_list_empty(struct htree_lock_node *ln)
260 +{
261 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
262 +}
263 +
264 +static void
265 +htree_key_list_del_init(struct htree_lock_node *ln)
266 +{
267 +       struct htree_lock_node *tmp = NULL;
268 +
269 +       if (!list_empty(&ln->ln_minor_list)) {
270 +               tmp = list_entry(ln->ln_minor_list.next,
271 +                                struct htree_lock_node, ln_minor_list);
272 +               list_del_init(&ln->ln_minor_list);
273 +       }
274 +
275 +       if (list_empty(&ln->ln_major_list))
276 +               return;
277 +
278 +       if (tmp == NULL) { /* not on minor key list */
279 +               list_del_init(&ln->ln_major_list);
280 +       } else {
281 +               BUG_ON(!list_empty(&tmp->ln_major_list));
282 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
283 +       }
284 +}
285 +
286 +static void
287 +htree_key_list_replace_init(struct htree_lock_node *old,
288 +                           struct htree_lock_node *new)
289 +{
290 +       if (!list_empty(&old->ln_major_list))
291 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
292 +
293 +       if (!list_empty(&old->ln_minor_list))
294 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
295 +}
296 +
297 +static void
298 +htree_key_event_enqueue(struct htree_lock_child *child,
299 +                       struct htree_lock_node *ln, int dep, void *event)
300 +{
301 +       struct htree_lock_node *tmp;
302 +
303 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
304 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
305 +       if (event == NULL || htree_key_event_ignore(child, ln))
306 +               return;
307 +
308 +       /* shouldn't be a very long list */
309 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
310 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
311 +                       ln_event_inc(dep);
312 +                       if (child->lc_callback != NULL)
313 +                               child->lc_callback(tmp->ln_ev_target, event);
314 +               }
315 +       }
316 +}
317 +
318 +static int
319 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
320 +                       unsigned dep, int wait, void *event)
321 +{
322 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
323 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
324 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
325 +
326 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
327 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
328 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
329 +        * NL is only used for listener, user can't directly require NL mode */
330 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
331 +           (curln->ln_mode != HTREE_LOCK_PW &&
332 +            newln->ln_mode != HTREE_LOCK_PW)) {
333 +               /* no conflict, attach it on granted list of @curlk */
334 +               if (curln->ln_mode != HTREE_LOCK_NL) {
335 +                       list_add(&newln->ln_granted_list,
336 +                                &curln->ln_granted_list);
337 +               } else {
338 +                       /* replace key owner */
339 +                       htree_key_list_replace_init(curln, newln);
340 +               }
341 +
342 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
343 +               htree_key_event_enqueue(child, newln, dep, event);
344 +               ln_grant_inc(dep, newln->ln_mode);
345 +               return 1; /* still hold lh_lock */
346 +       }
347 +
348 +       if (!wait) { /* can't grant and don't want to wait */
349 +               ln_retry_inc(dep, newln->ln_mode);
350 +               newln->ln_mode = HTREE_LOCK_INVAL;
351 +               return -1; /* don't wait and just return -1 */
352 +       }
353 +
354 +       newlk->lk_task = current;
355 +       set_current_state(TASK_UNINTERRUPTIBLE);
356 +       /* conflict, attach it on blocked list of curlk */
357 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
358 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
359 +       ln_block_inc(dep, newln->ln_mode);
360 +
361 +       htree_spin_unlock(newlk->lk_head, dep);
362 +       /* wait to be given the lock */
363 +       if (newlk->lk_task != NULL)
364 +               schedule();
365 +       /* granted, no doubt, wake up will set me RUNNING */
366 +       if (event == NULL || htree_key_event_ignore(child, newln))
367 +               return 0; /* granted without lh_lock */
368 +
369 +       htree_spin_lock(newlk->lk_head, dep);
370 +       htree_key_event_enqueue(child, newln, dep, event);
371 +       return 1; /* still hold lh_lock */
372 +}
373 +
374 +/*
375 + * get PR/PW access to particular tree-node according to @dep and @key,
376 + * it will return -1 if @wait is false and can't immediately grant this lock.
377 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
378 + * @event if it's not NULL.
379 + * NB: ALWAYS called holding lhead::lh_lock
380 + */
381 +static int
382 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
383 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
384 +                        int wait, void *event)
385 +{
386 +       LIST_HEAD(list);
387 +       struct htree_lock       *tmp;
388 +       struct htree_lock       *tmp2;
389 +       u16                     major;
390 +       u16                     minor;
391 +       u8                      reverse;
392 +       u8                      ma_bits;
393 +       u8                      mi_bits;
394 +
395 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
396 +       BUG_ON(htree_node_is_granted(lck, dep));
397 +
398 +       key = hash_long(key, lhead->lh_hbits);
399 +
400 +       mi_bits = lhead->lh_hbits >> 1;
401 +       ma_bits = lhead->lh_hbits - mi_bits;
402 +
403 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
404 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
405 +       lck->lk_nodes[dep].ln_mode = mode;
406 +
407 +       /*
408 +        * The major key list is an ordered list, so searches are started
409 +        * at the end of the list that is numerically closer to major_key,
410 +        * so at most half of the list will be walked (for well-distributed
411 +        * keys). The list traversal aborts early if the expected key
412 +        * location is passed.
413 +        */
414 +       reverse = (major >= (1 << (ma_bits - 1)));
415 +
416 +       if (reverse) {
417 +               list_for_each_entry_reverse(tmp,
418 +                                       &lhead->lh_children[dep].lc_list,
419 +                                       lk_nodes[dep].ln_major_list) {
420 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
421 +                               goto search_minor;
422 +
423 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
424 +                               /* attach _after_ @tmp */
425 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
426 +                                        &tmp->lk_nodes[dep].ln_major_list);
427 +                               goto out_grant_major;
428 +                       }
429 +               }
430 +
431 +               list_add(&lck->lk_nodes[dep].ln_major_list,
432 +                        &lhead->lh_children[dep].lc_list);
433 +               goto out_grant_major;
434 +
435 +       } else {
436 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
437 +                                   lk_nodes[dep].ln_major_list) {
438 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
439 +                               goto search_minor;
440 +
441 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
442 +                               /* insert _before_ @tmp */
443 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
444 +                                       &tmp->lk_nodes[dep].ln_major_list);
445 +                               goto out_grant_major;
446 +                       }
447 +               }
448 +
449 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
450 +                             &lhead->lh_children[dep].lc_list);
451 +               goto out_grant_major;
452 +       }
453 +
454 + search_minor:
455 +       /*
456 +        * NB: minor_key list doesn't have a "head", @list is just a
457 +        * temporary stub for helping list searching, make sure it's removed
458 +        * after searching.
459 +        * minor_key list is an ordered list too.
460 +        */
461 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
462 +
463 +       reverse = (minor >= (1 << (mi_bits - 1)));
464 +
465 +       if (reverse) {
466 +               list_for_each_entry_reverse(tmp2, &list,
467 +                                           lk_nodes[dep].ln_minor_list) {
468 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
469 +                               goto out_enqueue;
470 +
471 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
472 +                               /* attach _after_ @tmp2 */
473 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
474 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
475 +                               goto out_grant_minor;
476 +                       }
477 +               }
478 +
479 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
480 +
481 +       } else {
482 +               list_for_each_entry(tmp2, &list,
483 +                                   lk_nodes[dep].ln_minor_list) {
484 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
485 +                               goto out_enqueue;
486 +
487 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
488 +                               /* insert _before_ @tmp2 */
489 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
490 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
491 +                               goto out_grant_minor;
492 +                       }
493 +               }
494 +
495 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
496 +       }
497 +
498 + out_grant_minor:
499 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
500 +               /* new lock @lck is the first one on minor_key list, which
501 +                * means it has the smallest minor_key and it should
502 +                * replace @tmp as minor_key owner */
503 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
504 +                                 &lck->lk_nodes[dep].ln_major_list);
505 +       }
506 +       /* remove the temporary head */
507 +       list_del(&list);
508 +
509 + out_grant_major:
510 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
511 +       return 1; /* granted with holding lh_lock */
512 +
513 + out_enqueue:
514 +       list_del(&list); /* remove temprary head */
515 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
516 +}
517 +
518 +/*
519 + * release the key of @lck at level @dep, and grant any blocked locks.
520 + * caller will still listen on @key if @event is not NULL, which means
521 + * caller can see a event (by event_cb) while granting any lock with
522 + * the same key at level @dep.
523 + * NB: ALWAYS called holding lhead::lh_lock
524 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
525 + */
526 +static void
527 +htree_node_unlock_internal(struct htree_lock_head *lhead,
528 +                          struct htree_lock *curlk, unsigned dep, void *event)
529 +{
530 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
531 +       struct htree_lock       *grtlk = NULL;
532 +       struct htree_lock_node  *grtln;
533 +       struct htree_lock       *poslk;
534 +       struct htree_lock       *tmplk;
535 +
536 +       if (!htree_node_is_granted(curlk, dep))
537 +               return;
538 +
539 +       if (!list_empty(&curln->ln_granted_list)) {
540 +               /* there is another granted lock */
541 +               grtlk = list_entry(curln->ln_granted_list.next,
542 +                                  struct htree_lock,
543 +                                  lk_nodes[dep].ln_granted_list);
544 +               list_del_init(&curln->ln_granted_list);
545 +       }
546 +
547 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
548 +               /*
549 +                * @curlk is the only granted lock, so we confirmed:
550 +                * a) curln is key owner (attached on major/minor_list),
551 +                *    so if there is any blocked lock, it should be attached
552 +                *    on curln->ln_blocked_list
553 +                * b) we always can grant the first blocked lock
554 +                */
555 +               grtlk = list_entry(curln->ln_blocked_list.next,
556 +                                  struct htree_lock,
557 +                                  lk_nodes[dep].ln_blocked_list);
558 +               BUG_ON(grtlk->lk_task == NULL);
559 +               wake_up_process(grtlk->lk_task);
560 +       }
561 +
562 +       if (event != NULL &&
563 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
564 +               curln->ln_ev_target = event;
565 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
566 +       } else {
567 +               curln->ln_mode = HTREE_LOCK_INVAL;
568 +       }
569 +
570 +       if (grtlk == NULL) { /* I must be the only one locking this key */
571 +               struct htree_lock_node *tmpln;
572 +
573 +               BUG_ON(htree_key_list_empty(curln));
574 +
575 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
576 +                       return;
577 +
578 +               /* not listening */
579 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
580 +                       htree_key_list_del_init(curln);
581 +                       return;
582 +               }
583 +
584 +               tmpln = list_entry(curln->ln_alive_list.next,
585 +                                  struct htree_lock_node, ln_alive_list);
586 +
587 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
588 +
589 +               htree_key_list_replace_init(curln, tmpln);
590 +               list_del_init(&curln->ln_alive_list);
591 +
592 +               return;
593 +       }
594 +
595 +       /* have a granted lock */
596 +       grtln = &grtlk->lk_nodes[dep];
597 +       if (!list_empty(&curln->ln_blocked_list)) {
598 +               /* only key owner can be on both lists */
599 +               BUG_ON(htree_key_list_empty(curln));
600 +
601 +               if (list_empty(&grtln->ln_blocked_list)) {
602 +                       list_add(&grtln->ln_blocked_list,
603 +                                &curln->ln_blocked_list);
604 +               }
605 +               list_del_init(&curln->ln_blocked_list);
606 +       }
607 +       /*
608 +        * NB: this is the tricky part:
609 +        * We have only two modes for child-lock (PR and PW), also,
610 +        * only owner of the key (attached on major/minor_list) can be on
611 +        * both blocked_list and granted_list, so @grtlk must be one
612 +        * of these two cases:
613 +        *
614 +        * a) @grtlk is taken from granted_list, which means we've granted
615 +        *    more than one lock so @grtlk has to be PR, the first blocked
616 +        *    lock must be PW and we can't grant it at all.
617 +        *    So even @grtlk is not owner of the key (empty blocked_list),
618 +        *    we don't care because we can't grant any lock.
619 +        * b) we just grant a new lock which is taken from head of blocked
620 +        *    list, and it should be the first granted lock, and it should
621 +        *    be the first one linked on blocked_list.
622 +        *
623 +        * Either way, we can get correct result by iterating blocked_list
624 +        * of @grtlk, and don't have to bother on how to find out
625 +        * owner of current key.
626 +        */
627 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
628 +                                lk_nodes[dep].ln_blocked_list) {
629 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
630 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
631 +                       break;
632 +               /* grant all readers */
633 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
634 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
635 +                        &grtln->ln_granted_list);
636 +
637 +               BUG_ON(poslk->lk_task == NULL);
638 +               wake_up_process(poslk->lk_task);
639 +       }
640 +
641 +       /* if @curln is the owner of this key, replace it with @grtln */
642 +       if (!htree_key_list_empty(curln))
643 +               htree_key_list_replace_init(curln, grtln);
644 +
645 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
646 +               list_del_init(&curln->ln_alive_list);
647 +}
648 +
649 +/*
650 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
651 + * and 0 only if @wait is false and can't grant it immediately
652 + */
653 +int
654 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
655 +                   u32 key, unsigned dep, int wait, void *event)
656 +{
657 +       struct htree_lock_head *lhead = lck->lk_head;
658 +       int rc;
659 +
660 +       BUG_ON(dep >= lck->lk_depth);
661 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
662 +
663 +       htree_spin_lock(lhead, dep);
664 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
665 +       if (rc != 0)
666 +               htree_spin_unlock(lhead, dep);
667 +       return rc >= 0;
668 +}
669 +EXPORT_SYMBOL(htree_node_lock_try);
670 +
671 +/* it's wrapper of htree_node_unlock_internal */
672 +void
673 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
674 +{
675 +       struct htree_lock_head *lhead = lck->lk_head;
676 +
677 +       BUG_ON(dep >= lck->lk_depth);
678 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
679 +
680 +       htree_spin_lock(lhead, dep);
681 +       htree_node_unlock_internal(lhead, lck, dep, event);
682 +       htree_spin_unlock(lhead, dep);
683 +}
684 +EXPORT_SYMBOL(htree_node_unlock);
685 +
686 +/* stop listening on child-lock level @dep */
687 +void
688 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
689 +{
690 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
691 +       struct htree_lock_node *tmp;
692 +
693 +       BUG_ON(htree_node_is_granted(lck, dep));
694 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
695 +       BUG_ON(!list_empty(&ln->ln_granted_list));
696 +
697 +       if (!htree_node_is_listening(lck, dep))
698 +               return;
699 +
700 +       htree_spin_lock(lck->lk_head, dep);
701 +       ln->ln_mode = HTREE_LOCK_INVAL;
702 +       ln->ln_ev_target = NULL;
703 +
704 +       if (htree_key_list_empty(ln)) { /* not owner */
705 +               list_del_init(&ln->ln_alive_list);
706 +               goto out;
707 +       }
708 +
709 +       /* I'm the owner... */
710 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
711 +               htree_key_list_del_init(ln);
712 +               goto out;
713 +       }
714 +
715 +       tmp = list_entry(ln->ln_alive_list.next,
716 +                        struct htree_lock_node, ln_alive_list);
717 +
718 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
719 +       htree_key_list_replace_init(ln, tmp);
720 +       list_del_init(&ln->ln_alive_list);
721 + out:
722 +       htree_spin_unlock(lck->lk_head, dep);
723 +}
724 +EXPORT_SYMBOL(htree_node_stop_listen);
725 +
726 +/* release all child-locks if we have any */
727 +static void
728 +htree_node_release_all(struct htree_lock *lck)
729 +{
730 +       int     i;
731 +
732 +       for (i = 0; i < lck->lk_depth; i++) {
733 +               if (htree_node_is_granted(lck, i))
734 +                       htree_node_unlock(lck, i, NULL);
735 +               else if (htree_node_is_listening(lck, i))
736 +                       htree_node_stop_listen(lck, i);
737 +       }
738 +}
739 +
740 +/*
741 + * obtain htree lock, it could be blocked inside if there's conflict
742 + * with any granted or blocked lock and @wait is true.
743 + * NB: ALWAYS called holding lhead::lh_lock
744 + */
745 +static int
746 +htree_lock_internal(struct htree_lock *lck, int wait)
747 +{
748 +       struct htree_lock_head *lhead = lck->lk_head;
749 +       int     granted = 0;
750 +       int     blocked = 0;
751 +       int     i;
752 +
753 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
754 +               if (lhead->lh_ngranted[i] != 0)
755 +                       granted |= 1 << i;
756 +               if (lhead->lh_nblocked[i] != 0)
757 +                       blocked |= 1 << i;
758 +       }
759 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
760 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
761 +               /* will block current lock even it just conflicts with any
762 +                * other blocked lock, so lock like EX wouldn't starve */
763 +               if (!wait)
764 +                       return -1;
765 +               lhead->lh_nblocked[lck->lk_mode]++;
766 +               lk_block_inc(lck->lk_mode);
767 +
768 +               lck->lk_task = current;
769 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
770 +
771 +retry:
772 +               set_current_state(TASK_UNINTERRUPTIBLE);
773 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
774 +               /* wait to be given the lock */
775 +               if (lck->lk_task != NULL)
776 +                       schedule();
777 +               /* granted, no doubt. wake up will set me RUNNING.
778 +                * Since thread would be waken up accidentally,
779 +                * so we need check lock whether granted or not again. */
780 +               if (!list_empty(&lck->lk_blocked_list)) {
781 +                       htree_spin_lock(lhead, HTREE_DEP_ROOT);
782 +                       if (list_empty(&lck->lk_blocked_list)) {
783 +                               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
784 +                               return 0;
785 +                       }
786 +                       goto retry;
787 +               }
788 +               return 0; /* without lh_lock */
789 +       }
790 +       lhead->lh_ngranted[lck->lk_mode]++;
791 +       lk_grant_inc(lck->lk_mode);
792 +       return 1;
793 +}
794 +
795 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
796 +static void
797 +htree_unlock_internal(struct htree_lock *lck)
798 +{
799 +       struct htree_lock_head *lhead = lck->lk_head;
800 +       struct htree_lock *tmp;
801 +       struct htree_lock *tmp2;
802 +       int granted = 0;
803 +       int i;
804 +
805 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
806 +
807 +       lhead->lh_ngranted[lck->lk_mode]--;
808 +       lck->lk_mode = HTREE_LOCK_INVAL;
809 +
810 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
811 +               if (lhead->lh_ngranted[i] != 0)
812 +                       granted |= 1 << i;
813 +       }
814 +       list_for_each_entry_safe(tmp, tmp2,
815 +                                &lhead->lh_blocked_list, lk_blocked_list) {
816 +               /* conflict with any granted lock? */
817 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
818 +                       break;
819 +
820 +               list_del_init(&tmp->lk_blocked_list);
821 +
822 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
823 +
824 +               lhead->lh_nblocked[tmp->lk_mode]--;
825 +               lhead->lh_ngranted[tmp->lk_mode]++;
826 +               granted |= 1 << tmp->lk_mode;
827 +
828 +               BUG_ON(tmp->lk_task == NULL);
829 +               wake_up_process(tmp->lk_task);
830 +       }
831 +}
832 +
833 +/* it's wrapper of htree_lock_internal and exported interface.
834 + * It always return 1 with granted lock if @wait is true, it can return 0
835 + * if @wait is false and locking request can't be granted immediately */
836 +int
837 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
838 +              htree_lock_mode_t mode, int wait)
839 +{
840 +       int     rc;
841 +
842 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
843 +       BUG_ON(lck->lk_head != NULL);
844 +       BUG_ON(lck->lk_task != NULL);
845 +
846 +       lck->lk_head = lhead;
847 +       lck->lk_mode = mode;
848 +
849 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
850 +       rc = htree_lock_internal(lck, wait);
851 +       if (rc != 0)
852 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
853 +       return rc >= 0;
854 +}
855 +EXPORT_SYMBOL(htree_lock_try);
856 +
857 +/* it's wrapper of htree_unlock_internal and exported interface.
858 + * It will release all htree_node_locks and htree_lock */
859 +void
860 +htree_unlock(struct htree_lock *lck)
861 +{
862 +       BUG_ON(lck->lk_head == NULL);
863 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
864 +
865 +       htree_node_release_all(lck);
866 +
867 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
868 +       htree_unlock_internal(lck);
869 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
870 +       lck->lk_head = NULL;
871 +       lck->lk_task = NULL;
872 +}
873 +EXPORT_SYMBOL(htree_unlock);
874 +
875 +/* change lock mode */
876 +void
877 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
878 +{
879 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
880 +       lck->lk_mode = mode;
881 +}
882 +EXPORT_SYMBOL(htree_change_mode);
883 +
884 +/* release htree lock, and lock it again with new mode.
885 + * This function will first release all htree_node_locks and htree_lock,
886 + * then try to gain htree_lock with new @mode.
887 + * It always return 1 with granted lock if @wait is true, it can return 0
888 + * if @wait is false and locking request can't be granted immediately */
889 +int
890 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
891 +{
892 +       struct htree_lock_head *lhead = lck->lk_head;
893 +       int rc;
894 +
895 +       BUG_ON(lhead == NULL);
896 +       BUG_ON(lck->lk_mode == mode);
897 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
898 +
899 +       htree_node_release_all(lck);
900 +
901 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
902 +       htree_unlock_internal(lck);
903 +       lck->lk_mode = mode;
904 +       rc = htree_lock_internal(lck, wait);
905 +       if (rc != 0)
906 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
907 +       return rc >= 0;
908 +}
909 +EXPORT_SYMBOL(htree_change_lock_try);
910 +
911 +/* create a htree_lock head with @depth levels (number of child-locks),
912 + * it is a per resoruce structure */
913 +struct htree_lock_head *
914 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
915 +{
916 +       struct htree_lock_head *lhead;
917 +       int  i;
918 +
919 +       if (depth > HTREE_LOCK_DEP_MAX) {
920 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
921 +                       depth, HTREE_LOCK_DEP_MAX);
922 +               return NULL;
923 +       }
924 +
925 +       lhead = kzalloc(offsetof(struct htree_lock_head,
926 +                                lh_children[depth]) + priv, GFP_NOFS);
927 +       if (lhead == NULL)
928 +               return NULL;
929 +
930 +       if (hbits < HTREE_HBITS_MIN)
931 +               lhead->lh_hbits = HTREE_HBITS_MIN;
932 +       else if (hbits > HTREE_HBITS_MAX)
933 +               lhead->lh_hbits = HTREE_HBITS_MAX;
934 +
935 +       lhead->lh_lock = 0;
936 +       lhead->lh_depth = depth;
937 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
938 +       if (priv > 0) {
939 +               lhead->lh_private = (void *)lhead +
940 +                       offsetof(struct htree_lock_head, lh_children[depth]);
941 +       }
942 +
943 +       for (i = 0; i < depth; i++) {
944 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
945 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
946 +       }
947 +       return lhead;
948 +}
949 +EXPORT_SYMBOL(htree_lock_head_alloc);
950 +
951 +/* free the htree_lock head */
952 +void
953 +htree_lock_head_free(struct htree_lock_head *lhead)
954 +{
955 +       int     i;
956 +
957 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
958 +       for (i = 0; i < lhead->lh_depth; i++)
959 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
960 +       kfree(lhead);
961 +}
962 +EXPORT_SYMBOL(htree_lock_head_free);
963 +
964 +/* register event callback for @events of child-lock at level @dep */
965 +void
966 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
967 +                       unsigned events, htree_event_cb_t callback)
968 +{
969 +       BUG_ON(lhead->lh_depth <= dep);
970 +       lhead->lh_children[dep].lc_events = events;
971 +       lhead->lh_children[dep].lc_callback = callback;
972 +}
973 +EXPORT_SYMBOL(htree_lock_event_attach);
974 +
975 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
976 + * extra-bytes as private data for caller */
977 +struct htree_lock *
978 +htree_lock_alloc(unsigned depth, unsigned pbytes)
979 +{
980 +       struct htree_lock *lck;
981 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
982 +
983 +       if (depth > HTREE_LOCK_DEP_MAX) {
984 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
985 +                       depth, HTREE_LOCK_DEP_MAX);
986 +               return NULL;
987 +       }
988 +       lck = kzalloc(i + pbytes, GFP_NOFS);
989 +       if (lck == NULL)
990 +               return NULL;
991 +
992 +       if (pbytes != 0)
993 +               lck->lk_private = (void *)lck + i;
994 +       lck->lk_mode = HTREE_LOCK_INVAL;
995 +       lck->lk_depth = depth;
996 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
997 +
998 +       for (i = 0; i < depth; i++) {
999 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1000 +
1001 +               node->ln_mode = HTREE_LOCK_INVAL;
1002 +               INIT_LIST_HEAD(&node->ln_major_list);
1003 +               INIT_LIST_HEAD(&node->ln_minor_list);
1004 +               INIT_LIST_HEAD(&node->ln_alive_list);
1005 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1006 +               INIT_LIST_HEAD(&node->ln_granted_list);
1007 +       }
1008 +
1009 +       return lck;
1010 +}
1011 +EXPORT_SYMBOL(htree_lock_alloc);
1012 +
1013 +/* free htree_lock node */
1014 +void
1015 +htree_lock_free(struct htree_lock *lck)
1016 +{
1017 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1018 +       kfree(lck);
1019 +}
1020 +EXPORT_SYMBOL(htree_lock_free);
1021 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1022 index 2543b8f..e70b61a 100644
1023 --- a/fs/ext4/namei.c
1024 +++ b/fs/ext4/namei.c
1025 @@ -52,6 +52,7 @@ struct buffer_head *ext4_append(handle_t *handle,
1026                                         ext4_lblk_t *block)
1027  {
1028         struct buffer_head *bh;
1029 +       struct ext4_inode_info *ei = EXT4_I(inode);
1030         int err;
1031  
1032         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1033 @@ -59,15 +60,22 @@ struct buffer_head *ext4_append(handle_t *handle,
1034                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1035                 return ERR_PTR(-ENOSPC);
1036  
1037 +       /* with parallel dir operations all appends
1038 +       * have to be serialized -bzzz */
1039 +       down(&ei->i_append_sem);
1040 +
1041         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1042  
1043         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
1044 -       if (IS_ERR(bh))
1045 +       if (IS_ERR(bh)) {
1046 +               up(&ei->i_append_sem);
1047                 return bh;
1048 +       }
1049         inode->i_size += inode->i_sb->s_blocksize;
1050         EXT4_I(inode)->i_disksize = inode->i_size;
1051         BUFFER_TRACE(bh, "get_write_access");
1052         err = ext4_journal_get_write_access(handle, bh);
1053 +       up(&ei->i_append_sem);
1054         if (err) {
1055                 brelse(bh);
1056                 ext4_std_error(inode->i_sb, err);
1057 @@ -247,7 +255,8 @@ static unsigned dx_node_limit(struct inode *dir);
1058  static struct dx_frame *dx_probe(struct ext4_filename *fname,
1059                                  struct inode *dir,
1060                                  struct dx_hash_info *hinfo,
1061 -                                struct dx_frame *frame);
1062 +                                struct dx_frame *frame,
1063 +                                struct htree_lock *lck);
1064  static void dx_release(struct dx_frame *frames);
1065  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1066                        unsigned blocksize, struct dx_hash_info *hinfo,
1067 @@ -261,12 +270,13 @@ static void dx_insert_block(struct dx_frame *frame,
1068  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1069                                  struct dx_frame *frame,
1070                                  struct dx_frame *frames,
1071 -                                __u32 *start_hash);
1072 +                                __u32 *start_hash, struct htree_lock *lck);
1073  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1074                 struct ext4_filename *fname,
1075 -               struct ext4_dir_entry_2 **res_dir);
1076 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
1077  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1078 -                            struct dentry *dentry, struct inode *inode);
1079 +                            struct dentry *dentry, struct inode *inode,
1080 +                            struct htree_lock *lck);
1081  
1082  /* checksumming functions */
1083  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1084 @@ -733,6 +743,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1085  }
1086  #endif /* DX_DEBUG */
1087  
1088 +/* private data for htree_lock */
1089 +struct ext4_dir_lock_data {
1090 +       unsigned                ld_flags;  /* bits-map for lock types */
1091 +       unsigned                ld_count;  /* # entries of the last DX block */
1092 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1093 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1094 +};
1095 +
1096 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1097 +#define ext4_find_entry(dir, name, dirent, inline) \
1098 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
1099 +#define ext4_add_entry(handle, dentry, inode) \
1100 +                       __ext4_add_entry(handle, dentry, inode, NULL)
1101 +
1102 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1103 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1104 +
1105 +static void ext4_htree_event_cb(void *target, void *event)
1106 +{
1107 +       u64 *block = (u64 *)target;
1108 +
1109 +       if (*block == dx_get_block((struct dx_entry *)event))
1110 +               *block = EXT4_HTREE_NODE_CHANGED;
1111 +}
1112 +
1113 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1114 +{
1115 +       struct htree_lock_head *lhead;
1116 +
1117 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1118 +       if (lhead != NULL) {
1119 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1120 +                                       ext4_htree_event_cb);
1121 +       }
1122 +       return lhead;
1123 +}
1124 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1125 +
1126 +struct htree_lock *ext4_htree_lock_alloc(void)
1127 +{
1128 +       return htree_lock_alloc(EXT4_LK_MAX,
1129 +                               sizeof(struct ext4_dir_lock_data));
1130 +}
1131 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1132 +
1133 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1134 +{
1135 +       switch (flags) {
1136 +       default: /* 0 or unknown flags require EX lock */
1137 +               return HTREE_LOCK_EX;
1138 +       case EXT4_HLOCK_READDIR:
1139 +               return HTREE_LOCK_PR;
1140 +       case EXT4_HLOCK_LOOKUP:
1141 +               return HTREE_LOCK_CR;
1142 +       case EXT4_HLOCK_DEL:
1143 +       case EXT4_HLOCK_ADD:
1144 +               return HTREE_LOCK_CW;
1145 +       }
1146 +}
1147 +
1148 +/* return PR for read-only operations, otherwise return EX */
1149 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1150 +{
1151 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1152 +
1153 +       /* 0 requires EX lock */
1154 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1155 +}
1156 +
1157 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1158 +{
1159 +       int writer;
1160 +
1161 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1162 +               return 1;
1163 +
1164 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1165 +                EXT4_LB_DE;
1166 +       if (writer) /* all readers & writers are excluded? */
1167 +               return lck->lk_mode == HTREE_LOCK_EX;
1168 +
1169 +       /* all writers are excluded? */
1170 +       return lck->lk_mode == HTREE_LOCK_PR ||
1171 +              lck->lk_mode == HTREE_LOCK_PW ||
1172 +              lck->lk_mode == HTREE_LOCK_EX;
1173 +}
1174 +
1175 +/* relock htree_lock with EX mode if it's change operation, otherwise
1176 + * relock it with PR mode. It's noop if PDO is disabled. */
1177 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1178 +{
1179 +       if (!ext4_htree_safe_locked(lck)) {
1180 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1181 +
1182 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1183 +       }
1184 +}
1185 +
1186 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1187 +                    struct inode *dir, unsigned flags)
1188 +{
1189 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1190 +                                             ext4_htree_safe_mode(flags);
1191 +
1192 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1193 +       htree_lock(lck, lhead, mode);
1194 +       if (!is_dx(dir))
1195 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1196 +}
1197 +EXPORT_SYMBOL(ext4_htree_lock);
1198 +
1199 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1200 +                               unsigned lmask, int wait, void *ev)
1201 +{
1202 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1203 +       u32     mode;
1204 +
1205 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1206 +       if (ext4_htree_safe_locked(lck) ||
1207 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1208 +               return 1;
1209 +
1210 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1211 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1212 +       while (1) {
1213 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1214 +                       return 1;
1215 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1216 +                       return 0;
1217 +               cpu_relax(); /* spin until granted */
1218 +       }
1219 +}
1220 +
1221 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1222 +{
1223 +       return ext4_htree_safe_locked(lck) ||
1224 +              htree_node_is_granted(lck, ffz(~lmask));
1225 +}
1226 +
1227 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1228 +                                  unsigned lmask, void *buf)
1229 +{
1230 +       /* NB: it's safe to call mutiple times or even it's not locked */
1231 +       if (!ext4_htree_safe_locked(lck) &&
1232 +            htree_node_is_granted(lck, ffz(~lmask)))
1233 +               htree_node_unlock(lck, ffz(~lmask), buf);
1234 +}
1235 +
1236 +#define ext4_htree_dx_lock(lck, key)           \
1237 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1238 +#define ext4_htree_dx_lock_try(lck, key)       \
1239 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1240 +#define ext4_htree_dx_unlock(lck)              \
1241 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1242 +#define ext4_htree_dx_locked(lck)              \
1243 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1244 +
1245 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1246 +{
1247 +       struct ext4_dir_lock_data *ld;
1248 +
1249 +       if (ext4_htree_safe_locked(lck))
1250 +               return;
1251 +
1252 +       ld = ext4_htree_lock_data(lck);
1253 +       switch (ld->ld_flags) {
1254 +       default:
1255 +               return;
1256 +       case EXT4_HLOCK_LOOKUP:
1257 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1258 +               return;
1259 +       case EXT4_HLOCK_DEL:
1260 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1261 +               return;
1262 +       case EXT4_HLOCK_ADD:
1263 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1264 +               return;
1265 +       }
1266 +}
1267 +
1268 +#define ext4_htree_de_lock(lck, key)           \
1269 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1270 +#define ext4_htree_de_unlock(lck)              \
1271 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1272 +
1273 +#define ext4_htree_spin_lock(lck, key, event)  \
1274 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1275 +#define ext4_htree_spin_unlock(lck)            \
1276 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1277 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1278 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1279 +
1280 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1281 +{
1282 +       if (!ext4_htree_safe_locked(lck) &&
1283 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1284 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1285 +}
1286 +
1287 +enum {
1288 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1289 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1290 +       DX_HASH_COL_NO,         /* there is no collision */
1291 +};
1292 +
1293 +static int dx_probe_hash_collision(struct htree_lock *lck,
1294 +                                  struct dx_entry *entries,
1295 +                                  struct dx_entry *at, u32 hash)
1296 +{
1297 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1298 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1299 +
1300 +       } else if (at == entries + dx_get_count(entries) - 1) {
1301 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1302 +
1303 +       } else { /* hash collision? */
1304 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1305 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1306 +       }
1307 +}
1308 +
1309  /*
1310   * Probe for a directory leaf block to search.
1311   *
1312 @@ -744,10 +975,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1313   */
1314  static struct dx_frame *
1315  dx_probe(struct ext4_filename *fname, struct inode *dir,
1316 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
1317 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1318 +        struct htree_lock *lck)
1319  {
1320         unsigned count, indirect;
1321 -       struct dx_entry *at, *entries, *p, *q, *m;
1322 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1323         struct dx_root_info *info;
1324         struct dx_frame *frame = frame_in;
1325         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
1326 @@ -808,8 +1040,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1327  
1328         dxtrace(printk("Look up %x", hash));
1329         while (1) {
1330 +               if (indirect == 0) { /* the last index level */
1331 +                       /* NB: ext4_htree_dx_lock() could be noop if
1332 +                        * DX-lock flag is not set for current operation */
1333 +                       ext4_htree_dx_lock(lck, dx);
1334 +                       ext4_htree_spin_lock(lck, dx, NULL);
1335 +               }
1336                 count = dx_get_count(entries);
1337 -               if (!count || count > dx_get_limit(entries)) {
1338 +               if (count == 0 || count > dx_get_limit(entries)) {
1339 +                       ext4_htree_spin_unlock(lck); /* release spin */
1340                         ext4_warning_inode(dir,
1341                                            "dx entry: count %u beyond limit %u",
1342                                            count, dx_get_limit(entries));
1343 @@ -847,8 +1086,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1344                                dx_get_block(at)));
1345                 frame->entries = entries;
1346                 frame->at = at;
1347 -               if (!indirect--)
1348 +
1349 +               if (indirect == 0) { /* the last index level */
1350 +                       struct ext4_dir_lock_data *ld;
1351 +                       u64 myblock;
1352 +
1353 +                       /* By default we only lock DE-block, however, we will
1354 +                        * also lock the last level DX-block if:
1355 +                        * a) there is hash collision
1356 +                        *    we will set DX-lock flag (a few lines below)
1357 +                        *    and redo to lock DX-block
1358 +                        *    see detail in dx_probe_hash_collision()
1359 +                        * b) it's a retry from splitting
1360 +                        *    we need to lock the last level DX-block so nobody
1361 +                        *    else can split any leaf blocks under the same
1362 +                        *    DX-block, see detail in ext4_dx_add_entry()
1363 +                        */
1364 +                       if (ext4_htree_dx_locked(lck)) {
1365 +                               /* DX-block is locked, just lock DE-block
1366 +                                * and return */
1367 +                               ext4_htree_spin_unlock(lck);
1368 +                               if (!ext4_htree_safe_locked(lck))
1369 +                                       ext4_htree_de_lock(lck, frame->at);
1370 +                               return frame;
1371 +                       }
1372 +                       /* it's pdirop and no DX lock */
1373 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1374 +                           DX_HASH_COL_YES) {
1375 +                               /* found hash collision, set DX-lock flag
1376 +                                * and retry to abtain DX-lock */
1377 +                               ext4_htree_spin_unlock(lck);
1378 +                               ext4_htree_dx_need_lock(lck);
1379 +                               continue;
1380 +                       }
1381 +                       ld = ext4_htree_lock_data(lck);
1382 +                       /* because I don't lock DX, so @at can't be trusted
1383 +                        * after I release spinlock so I have to save it */
1384 +                       ld->ld_at = at;
1385 +                       ld->ld_at_entry = *at;
1386 +                       ld->ld_count = dx_get_count(entries);
1387 +
1388 +                       frame->at = &ld->ld_at_entry;
1389 +                       myblock = dx_get_block(at);
1390 +
1391 +                       /* NB: ordering locking */
1392 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1393 +                       /* other thread can split this DE-block because:
1394 +                        * a) I don't have lock for the DE-block yet
1395 +                        * b) I released spinlock on DX-block
1396 +                        * if it happened I can detect it by listening
1397 +                        * splitting event on this DE-block */
1398 +                       ext4_htree_de_lock(lck, frame->at);
1399 +                       ext4_htree_spin_stop_listen(lck);
1400 +
1401 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1402 +                               /* someone split this DE-block before
1403 +                                * I locked it, I need to retry and lock
1404 +                                * valid DE-block */
1405 +                               ext4_htree_de_unlock(lck);
1406 +                               continue;
1407 +                       }
1408                         return frame;
1409 +               }
1410 +               dx = at;
1411 +               indirect--;
1412                 frame++;
1413                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1414                 if (IS_ERR(frame->bh)) {
1415 @@ -915,7 +1216,7 @@ static void dx_release(struct dx_frame *frames)
1416  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1417                                  struct dx_frame *frame,
1418                                  struct dx_frame *frames,
1419 -                                __u32 *start_hash)
1420 +                                __u32 *start_hash, struct htree_lock *lck)
1421  {
1422         struct dx_frame *p;
1423         struct buffer_head *bh;
1424 @@ -930,12 +1231,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1425          * this loop, num_frames indicates the number of interior
1426          * nodes need to be read.
1427          */
1428 +       ext4_htree_de_unlock(lck);
1429         while (1) {
1430 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1431 -                       break;
1432 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1433 +                       /* num_frames > 0 :
1434 +                        *   DX block
1435 +                        * ext4_htree_dx_locked:
1436 +                        *   frame->at is reliable pointer returned by dx_probe,
1437 +                        *   otherwise dx_probe already knew no collision */
1438 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1439 +                               break;
1440 +               }
1441                 if (p == frames)
1442                         return 0;
1443                 num_frames++;
1444 +               if (num_frames == 1)
1445 +                       ext4_htree_dx_unlock(lck);
1446                 p--;
1447         }
1448  
1449 @@ -958,6 +1269,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1450          * block so no check is necessary
1451          */
1452         while (num_frames--) {
1453 +               if (num_frames == 0) {
1454 +                       /* it's not always necessary, we just don't want to
1455 +                        * detect hash collision again */
1456 +                       ext4_htree_dx_need_lock(lck);
1457 +                       ext4_htree_dx_lock(lck, p->at);
1458 +               }
1459 +
1460                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1461                 if (IS_ERR(bh))
1462                         return PTR_ERR(bh);
1463 @@ -966,6 +1284,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1464                 p->bh = bh;
1465                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1466         }
1467 +       ext4_htree_de_lock(lck, p->at);
1468         return 1;
1469  }
1470  
1471 @@ -1110,10 +1429,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1472         }
1473         hinfo.hash = start_hash;
1474         hinfo.minor_hash = 0;
1475 -       frame = dx_probe(NULL, dir, &hinfo, frames);
1476 +       /* assume it's PR locked */
1477 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
1478         if (IS_ERR(frame))
1479                 return PTR_ERR(frame);
1480 -
1481         /* Add '.' and '..' from the htree header */
1482         if (!start_hash && !start_minor_hash) {
1483                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1484 @@ -1148,7 +1467,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1485                 count += ret;
1486                 hashval = ~0;
1487                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1488 -                                           frame, frames, &hashval);
1489 +                                           frame, frames, &hashval, NULL);
1490                 *next_hash = hashval;
1491                 if (ret < 0) {
1492                         err = ret;
1493 @@ -1372,10 +1691,10 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
1494   * The returned buffer_head has ->b_count elevated.  The caller is expected
1495   * to brelse() it when appropriate.
1496   */
1497 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1498 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1499                                         const struct qstr *d_name,
1500                                         struct ext4_dir_entry_2 **res_dir,
1501 -                                       int *inlined)
1502 +                                       int *inlined, struct htree_lock *lck)
1503  {
1504         struct super_block *sb;
1505         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1506 @@ -1423,7 +1742,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1507                 goto restart;
1508         }
1509         if (is_dx(dir)) {
1510 -               ret = ext4_dx_find_entry(dir, &fname, res_dir);
1511 +               ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
1512                 /*
1513                  * On success, or if the error was file not found,
1514                  * return.  Otherwise, fall back to doing a search the
1515 @@ -1433,6 +1752,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1516                         goto cleanup_and_exit;
1517                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1518                                "falling back\n"));
1519 +               ext4_htree_safe_relock(lck);
1520         }
1521         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1522         start = EXT4_I(dir)->i_dir_start_lookup;
1523 @@ -1528,10 +1848,12 @@ cleanup_and_exit:
1524         ext4_fname_free_filename(&fname);
1525         return ret;
1526  }
1527 +EXPORT_SYMBOL(__ext4_find_entry);
1528  
1529  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1530                         struct ext4_filename *fname,
1531 -                       struct ext4_dir_entry_2 **res_dir)
1532 +                       struct ext4_dir_entry_2 **res_dir,
1533 +                       struct htree_lock *lck)
1534  {
1535         struct super_block * sb = dir->i_sb;
1536         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1537 @@ -1543,7 +1865,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1538  #ifdef CONFIG_EXT4_FS_ENCRYPTION
1539         *res_dir = NULL;
1540  #endif
1541 -       frame = dx_probe(fname, dir, NULL, frames);
1542 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1543         if (IS_ERR(frame))
1544                 return (struct buffer_head *) frame;
1545         do {
1546 @@ -1565,7 +1887,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1547  
1548                 /* Check to see if we should continue to search */
1549                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
1550 -                                              frames, NULL);
1551 +                                              frames, NULL, lck);
1552                 if (retval < 0) {
1553                         ext4_warning_inode(dir,
1554                                 "error %d reading directory index block",
1555 @@ -1738,8 +2060,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1556   * Returns pointer to de in block into which the new entry will be inserted.
1557   */
1558  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1559 -                       struct buffer_head **bh,struct dx_frame *frame,
1560 -                       struct dx_hash_info *hinfo)
1561 +                       struct buffer_head **bh, struct dx_frame *frames,
1562 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1563 +                       struct htree_lock *lck)
1564  {
1565         unsigned blocksize = dir->i_sb->s_blocksize;
1566         unsigned count, continued;
1567 @@ -1801,8 +2124,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1568                                         hash2, split, count-split));
1569  
1570         /* Fancy dance to stay within two buffers */
1571 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
1572 -                             blocksize);
1573 +       if (hinfo->hash < hash2) {
1574 +               de2 = dx_move_dirents(data1, data2, map + split,
1575 +                                     count - split, blocksize);
1576 +       } else {
1577 +               /* make sure we will add entry to the same block which
1578 +                * we have already locked */
1579 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1580 +       }
1581         de = dx_pack_dirents(data1, blocksize);
1582         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1583                                            (char *) de,
1584 @@ -1823,12 +2152,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1585         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1586                         blocksize, 1));
1587  
1588 -       /* Which block gets the new entry? */
1589 -       if (hinfo->hash >= hash2) {
1590 -               swap(*bh, bh2);
1591 -               de = de2;
1592 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1593 +                            frame->at); /* notify block is being split */
1594 +       if (hinfo->hash < hash2) {
1595 +               dx_insert_block(frame, hash2 + continued, newblock);
1596 +
1597 +       } else {
1598 +               /* switch block number */
1599 +               dx_insert_block(frame, hash2 + continued,
1600 +                               dx_get_block(frame->at));
1601 +               dx_set_block(frame->at, newblock);
1602 +               (frame->at)++;
1603         }
1604 -       dx_insert_block(frame, hash2 + continued, newblock);
1605 +       ext4_htree_spin_unlock(lck);
1606 +       ext4_htree_dx_unlock(lck);
1607 +
1608         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1609         if (err)
1610                 goto journal_error;
1611 @@ -2121,7 +2459,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
1612         if (retval)
1613                 goto out_frames;        
1614  
1615 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
1616 +       de = do_split(handle,dir, &bh2, frames, frame, &fname->hinfo, NULL);
1617         if (IS_ERR(de)) {
1618                 retval = PTR_ERR(de);
1619                 goto out_frames;
1620 @@ -2231,8 +2569,8 @@ out:
1621   * may not sleep between calling this and putting something into
1622   * the entry, as someone else might have used it while you slept.
1623   */
1624 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1625 -                         struct inode *inode)
1626 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1627 +                     struct inode *inode, struct htree_lock *lck)
1628  {
1629         struct inode *dir = d_inode(dentry->d_parent);
1630         struct buffer_head *bh = NULL;
1631 @@ -2273,9 +2611,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1632                 if (dentry->d_name.len == 2 &&
1633                     memcmp(dentry->d_name.name, "..", 2) == 0)
1634                         return ext4_update_dotdot(handle, dentry, inode);
1635 -               retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
1636 +               retval = ext4_dx_add_entry(handle, &fname, dentry, inode, lck);
1637                 if (!retval || (retval != ERR_BAD_DX_DIR))
1638                         goto out;
1639 +               ext4_htree_safe_relock(lck);
1640                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1641                 dx_fallback++;
1642                 ext4_mark_inode_dirty(handle, dir);
1643 @@ -2325,12 +2664,14 @@ out:
1644                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1645         return retval;
1646  }
1647 +EXPORT_SYMBOL(__ext4_add_entry);
1648  
1649  /*
1650   * Returns 0 for success, or a negative error value
1651   */
1652  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1653 -                            struct dentry *dentry, struct inode *inode)
1654 +                            struct dentry *dentry, struct inode *inode,
1655 +                            struct htree_lock *lck)
1656  {
1657         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1658         struct dx_entry *entries, *at;
1659 @@ -2343,7 +2684,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1660  
1661  again:
1662         restart = 0;
1663 -       frame = dx_probe(fname, dir, NULL, frames);
1664 +       frame = dx_probe(fname, dir, NULL, frames, lck);
1665         if (IS_ERR(frame))
1666                 return PTR_ERR(frame);
1667         entries = frame->entries;
1668 @@ -2373,6 +2714,11 @@ again:
1669                 struct dx_node *node2;
1670                 struct buffer_head *bh2;
1671  
1672 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1673 +                       ext4_htree_safe_relock(lck);
1674 +                       restart = 1;
1675 +                       goto cleanup;
1676 +               }
1677                 while (frame > frames) {
1678                         if (dx_get_count((frame - 1)->entries) <
1679                             dx_get_limit((frame - 1)->entries)) {
1680 @@ -2472,8 +2818,32 @@ again:
1681                         restart = 1;
1682                         goto journal_error;
1683                 }
1684 +       } else if (!ext4_htree_dx_locked(lck)) {
1685 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1686 +
1687 +               /* not well protected, require DX lock */
1688 +               ext4_htree_dx_need_lock(lck);
1689 +               at = frame > frames ? (frame - 1)->at : NULL;
1690 +
1691 +               /* NB: no risk of deadlock because it's just a try.
1692 +                *
1693 +                * NB: we check ld_count for twice, the first time before
1694 +                * having DX lock, the second time after holding DX lock.
1695 +                *
1696 +                * NB: We never free blocks for directory so far, which
1697 +                * means value returned by dx_get_count() should equal to
1698 +                * ld->ld_count if nobody split any DE-block under @at,
1699 +                * and ld->ld_at still points to valid dx_entry. */
1700 +               if ((ld->ld_count != dx_get_count(entries)) ||
1701 +                   !ext4_htree_dx_lock_try(lck, at) ||
1702 +                   (ld->ld_count != dx_get_count(entries))) {
1703 +                       restart = 1;
1704 +                       goto cleanup;
1705 +               }
1706 +               /* OK, I've got DX lock and nothing changed */
1707 +               frame->at = ld->ld_at;
1708         }
1709 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
1710 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
1711         if (IS_ERR(de)) {
1712                 err = PTR_ERR(de);
1713                 goto cleanup;
1714 @@ -2484,6 +2854,8 @@ again:
1715  journal_error:
1716         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1717  cleanup:
1718 +       ext4_htree_dx_unlock(lck);
1719 +       ext4_htree_de_unlock(lck);
1720         brelse(bh);
1721         dx_release(frames);
1722         /* @restart is true means htree-path has been changed, we need to
1723 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1724 index 02fe65b..be65ad4 100644
1725 --- a/fs/ext4/super.c
1726 +++ b/fs/ext4/super.c
1727 @@ -896,6 +896,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
1728  
1729         ei->vfs_inode.i_version = 1;
1730         spin_lock_init(&ei->i_raw_lock);
1731 +       sema_init(&ei->i_append_sem, 1);
1732         INIT_LIST_HEAD(&ei->i_prealloc_list);
1733         spin_lock_init(&ei->i_prealloc_lock);
1734         ext4_es_init_tree(&ei->i_es_tree);
1735 diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
1736 new file mode 100644
1737 index 0000000..9dc7788
1738 --- /dev/null
1739 +++ b/include/linux/htree_lock.h
1740 @@ -0,0 +1,187 @@
1741 +/*
1742 + * include/linux/htree_lock.h
1743 + *
1744 + * Copyright (c) 2011, 2012, Intel Corporation.
1745 + *
1746 + * Author: Liang Zhen <liang@whamcloud.com>
1747 + */
1748 +
1749 +/*
1750 + * htree lock
1751 + *
1752 + * htree_lock is an advanced lock, it can support five lock modes (concept is
1753 + * taken from DLM) and it's a sleeping lock.
1754 + *
1755 + * most common use case is:
1756 + * - create a htree_lock_head for data
1757 + * - each thread (contender) creates it's own htree_lock
1758 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
1759 + *   call htree_unlock to release lock
1760 + *
1761 + * Also, there is advanced use-case which is more complex, user can have
1762 + * PW/PR lock on particular key, it's mostly used while user holding shared
1763 + * lock on the htree (CW, CR)
1764 + *
1765 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
1766 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
1767 + * ...
1768 + * htree_node_unlock(lock_node);; unlock the key
1769 + *
1770 + * Another tip is, we can have N-levels of this kind of keys, all we need to
1771 + * do is specifying N-levels while creating htree_lock_head, then we can
1772 + * lock/unlock a specific level by:
1773 + * htree_node_lock(lock_node, mode1, key1, level1...);
1774 + * do something;
1775 + * htree_node_lock(lock_node, mode1, key2, level2...);
1776 + * do something;
1777 + * htree_node_unlock(lock_node, level2);
1778 + * htree_node_unlock(lock_node, level1);
1779 + *
1780 + * NB: for multi-level, should be careful about locking order to avoid deadlock
1781 + */
1782 +
1783 +#ifndef _LINUX_HTREE_LOCK_H
1784 +#define _LINUX_HTREE_LOCK_H
1785 +
1786 +#include <linux/list.h>
1787 +#include <linux/spinlock.h>
1788 +#include <linux/sched.h>
1789 +
1790 +/*
1791 + * Lock Modes
1792 + * more details can be found here:
1793 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
1794 + */
1795 +typedef enum {
1796 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
1797 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
1798 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
1799 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
1800 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
1801 +       HTREE_LOCK_MAX,      /* number of lock modes */
1802 +} htree_lock_mode_t;
1803 +
1804 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
1805 +#define HTREE_LOCK_INVAL       0xdead10c
1806 +
1807 +enum {
1808 +       HTREE_HBITS_MIN         = 2,
1809 +       HTREE_HBITS_DEF         = 14,
1810 +       HTREE_HBITS_MAX         = 32,
1811 +};
1812 +
1813 +enum {
1814 +       HTREE_EVENT_DISABLE     = (0),
1815 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
1816 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
1817 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
1818 +};
1819 +
1820 +struct htree_lock;
1821 +
1822 +typedef void (*htree_event_cb_t)(void *target, void *event);
1823 +
1824 +struct htree_lock_child {
1825 +       struct list_head        lc_list;        /* granted list */
1826 +       htree_event_cb_t        lc_callback;    /* event callback */
1827 +       unsigned                lc_events;      /* event types */
1828 +};
1829 +
1830 +struct htree_lock_head {
1831 +       unsigned long           lh_lock;        /* bits lock */
1832 +       /* blocked lock list (htree_lock) */
1833 +       struct list_head        lh_blocked_list;
1834 +       /* # key levels */
1835 +       u16                     lh_depth;
1836 +       /* hash bits for key and limit number of locks */
1837 +       u16                     lh_hbits;
1838 +       /* counters for blocked locks */
1839 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
1840 +       /* counters for granted locks */
1841 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
1842 +       /* private data */
1843 +       void                    *lh_private;
1844 +       /* array of children locks */
1845 +       struct htree_lock_child lh_children[0];
1846 +};
1847 +
1848 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
1849 +struct htree_lock_node {
1850 +       htree_lock_mode_t       ln_mode;
1851 +       /* major hash key */
1852 +       u16                     ln_major_key;
1853 +       /* minor hash key */
1854 +       u16                     ln_minor_key;
1855 +       struct list_head        ln_major_list;
1856 +       struct list_head        ln_minor_list;
1857 +       /* alive list, all locks (granted, blocked, listening) are on it */
1858 +       struct list_head        ln_alive_list;
1859 +       /* blocked list */
1860 +       struct list_head        ln_blocked_list;
1861 +       /* granted list */
1862 +       struct list_head        ln_granted_list;
1863 +       void                    *ln_ev_target;
1864 +};
1865 +
1866 +struct htree_lock {
1867 +       struct task_struct      *lk_task;
1868 +       struct htree_lock_head  *lk_head;
1869 +       void                    *lk_private;
1870 +       unsigned                lk_depth;
1871 +       htree_lock_mode_t       lk_mode;
1872 +       struct list_head        lk_blocked_list;
1873 +       struct htree_lock_node  lk_nodes[0];
1874 +};
1875 +
1876 +/* create a lock head, which stands for a resource */
1877 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
1878 +                                             unsigned hbits, unsigned priv);
1879 +/* free a lock head */
1880 +void htree_lock_head_free(struct htree_lock_head *lhead);
1881 +/* register event callback for child lock at level @depth */
1882 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
1883 +                            unsigned events, htree_event_cb_t callback);
1884 +/* create a lock handle, which stands for a thread */
1885 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
1886 +/* free a lock handle */
1887 +void htree_lock_free(struct htree_lock *lck);
1888 +/* lock htree, when @wait is true, 0 is returned if the lock can't
1889 + * be granted immediately */
1890 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
1891 +                  htree_lock_mode_t mode, int wait);
1892 +/* unlock htree */
1893 +void htree_unlock(struct htree_lock *lck);
1894 +/* unlock and relock htree with @new_mode */
1895 +int htree_change_lock_try(struct htree_lock *lck,
1896 +                         htree_lock_mode_t new_mode, int wait);
1897 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
1898 +/* require child lock (key) of htree at level @dep, @event will be sent to all
1899 + * listeners on this @key while lock being granted */
1900 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
1901 +                       u32 key, unsigned dep, int wait, void *event);
1902 +/* release child lock at level @dep, this lock will listen on it's key
1903 + * if @event isn't NULL, event_cb will be called against @lck while granting
1904 + * any other lock at level @dep with the same key */
1905 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
1906 +/* stop listening on child lock at level @dep */
1907 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
1908 +/* for debug */
1909 +void htree_lock_stat_print(int depth);
1910 +void htree_lock_stat_reset(void);
1911 +
1912 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
1913 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
1914 +
1915 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
1916 +
1917 +#define htree_node_lock(lck, mode, key, dep)   \
1918 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
1919 +/* this is only safe in thread context of lock owner */
1920 +#define htree_node_is_granted(lck, dep)                \
1921 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
1922 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
1923 +/* this is only safe in thread context of lock owner */
1924 +#define htree_node_is_listening(lck, dep)      \
1925 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
1926 +
1927 +#endif