Whamcloud - gitweb
LU-50 ldiskfs: pdirops patch for ldiskfs
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4_pdirop-rhel6.patch
1 --- /dev/null   2011-12-08 11:16:52.000000000 +0800
2 +++ linux-2.6.32-131.6.1-pdo/include/linux/htree_lock.h 2011-12-02 17:09:34.000000000 +0800
3 @@ -0,0 +1,187 @@
4 +/*
5 + * include/linux/htree_lock.h
6 + *
7 + * Copyright (c) 2011 Whamcloud, Inc.
8 + *
9 + * Author: Liang Zhen <liang@whamcloud.com>
10 + */
11 +
12 +/*
13 + * htree lock
14 + *
15 + * htree_lock is an advanced lock, it can support five lock modes (concept is
16 + * taken from DLM) and it's a sleeping lock.
17 + *
18 + * most common use case is:
19 + * - create a htree_lock_head for data
20 + * - each thread (contender) creates it's own htree_lock
21 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
22 + *   call htree_unlock to release lock
23 + *
24 + * Also, there is advanced use-case which is more complex, user can have
25 + * PW/PR lock on particular key, it's mostly used while user holding shared
26 + * lock on the htree (CW, CR)
27 + *
28 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
29 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
30 + * ...
31 + * htree_node_unlock(lock_node);; unlock the key
32 + *
33 + * Another tip is, we can have N-levels of this kind of keys, all we need to
34 + * do is specifying N-levels while creating htree_lock_head, then we can
35 + * lock/unlock a specific level by:
36 + * htree_node_lock(lock_node, mode1, key1, level1...);
37 + * do something;
38 + * htree_node_lock(lock_node, mode1, key2, level2...);
39 + * do something;
40 + * htree_node_unlock(lock_node, level2);
41 + * htree_node_unlock(lock_node, level1);
42 + *
43 + * NB: for multi-level, should be careful about locking order to avoid deadlock
44 + */
45 +
46 +#ifndef _LINUX_HTREE_LOCK_H
47 +#define _LINUX_HTREE_LOCK_H
48 +
49 +#include <linux/list.h>
50 +#include <linux/spinlock.h>
51 +#include <linux/sched.h>
52 +
53 +/*
54 + * Lock Modes
55 + * more details can be found here:
56 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
57 + */
58 +typedef enum {
59 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
60 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
61 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
62 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
63 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
64 +       HTREE_LOCK_MAX,      /* number of lock modes */
65 +} htree_lock_mode_t;
66 +
67 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
68 +#define HTREE_LOCK_INVAL       0xdead10c
69 +
70 +enum {
71 +       HTREE_HBITS_MIN         = 2,
72 +       HTREE_HBITS_DEF         = 14,
73 +       HTREE_HBITS_MAX         = 32,
74 +};
75 +
76 +enum {
77 +       HTREE_EVENT_DISABLE     = (0),
78 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
79 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
80 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
81 +};
82 +
83 +struct htree_lock;
84 +
85 +typedef void (*htree_event_cb_t)(void *target, void *event);
86 +
87 +struct htree_lock_child {
88 +       struct list_head        lc_list;        /* granted list */
89 +       htree_event_cb_t        lc_callback;    /* event callback */
90 +       unsigned                lc_events;      /* event types */
91 +};
92 +
93 +struct htree_lock_head {
94 +       unsigned long           lh_lock;        /* bits lock */
95 +       /* blocked lock list (htree_lock) */
96 +       struct list_head        lh_blocked_list;
97 +       /* # key levels */
98 +       u16                     lh_depth;
99 +       /* hash bits for key and limit number of locks */
100 +       u16                     lh_hbits;
101 +       /* counters for blocked locks */
102 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
103 +       /* counters for granted locks */
104 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
105 +       /* private data */
106 +       void                    *lh_private;
107 +       /* array of children locks */
108 +       struct htree_lock_child lh_children[0];
109 +};
110 +
111 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
112 +struct htree_lock_node {
113 +       htree_lock_mode_t       ln_mode;
114 +       /* major hash key */
115 +       u16                     ln_major_key;
116 +       /* minor hash key */
117 +       u16                     ln_minor_key;
118 +       struct list_head        ln_major_list;
119 +       struct list_head        ln_minor_list;
120 +       /* alive list, all locks (granted, blocked, listening) are on it */
121 +       struct list_head        ln_alive_list;
122 +       /* blocked list */
123 +       struct list_head        ln_blocked_list;
124 +       /* granted list */
125 +       struct list_head        ln_granted_list;
126 +       void                    *ln_ev_target;
127 +};
128 +
129 +struct htree_lock {
130 +       struct task_struct      *lk_task;
131 +       struct htree_lock_head  *lk_head;
132 +       void                    *lk_private;
133 +       unsigned                lk_depth;
134 +       htree_lock_mode_t       lk_mode;
135 +       struct list_head        lk_blocked_list;
136 +       struct htree_lock_node  lk_nodes[0];
137 +};
138 +
139 +/* create a lock head, which stands for a resource */
140 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
141 +                                             unsigned hbits, unsigned priv);
142 +/* free a lock head */
143 +void htree_lock_head_free(struct htree_lock_head *lhead);
144 +/* register event callback for child lock at level @depth */
145 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
146 +                            unsigned events, htree_event_cb_t callback);
147 +/* create a lock handle, which stands for a thread */
148 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
149 +/* free a lock handle */
150 +void htree_lock_free(struct htree_lock *lck);
151 +/* lock htree, when @wait is true, 0 is returned if the lock can't
152 + * be granted immediately */
153 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
154 +                  htree_lock_mode_t mode, int wait);
155 +/* unlock htree */
156 +void htree_unlock(struct htree_lock *lck);
157 +/* unlock and relock htree with @new_mode */
158 +int htree_change_lock_try(struct htree_lock *lck,
159 +                         htree_lock_mode_t new_mode, int wait);
160 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
161 +/* require child lock (key) of htree at level @dep, @event will be sent to all
162 + * listeners on this @key while lock being granted */
163 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
164 +                       u32 key, unsigned dep, int wait, void *event);
165 +/* release child lock at level @dep, this lock will listen on it's key
166 + * if @event isn't NULL, event_cb will be called against @lck while granting
167 + * any other lock at level @dep with the same key */
168 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
169 +/* stop listening on child lock at level @dep */
170 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
171 +/* for debug */
172 +void htree_lock_stat_print(int depth);
173 +void htree_lock_stat_reset(void);
174 +
175 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
176 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
177 +
178 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
179 +
180 +#define htree_node_lock(lck, mode, key, dep)   \
181 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
182 +/* this is only safe in thread context of lock owner */
183 +#define htree_node_is_granted(lck, dep)                \
184 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
185 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
186 +/* this is only safe in thread context of lock owner */
187 +#define htree_node_is_listening(lck, dep)      \
188 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
189 +
190 +#endif
191 --- /dev/null   2011-12-08 11:16:52.000000000 +0800
192 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/htree_lock.c       2011-12-08 18:18:18.000000000 +0800
193 @@ -0,0 +1,880 @@
194 +/*
195 + * fs/ext4/htree_lock.c
196 + *
197 + * Copyright (c) 2011 Whamcloud, Inc.
198 + *
199 + * Author: Liang Zhen <liang@whamcloud.com>
200 + */
201 +#include <linux/jbd2.h>
202 +#include <linux/hash.h>
203 +#include <linux/module.h>
204 +#include <linux/htree_lock.h>
205 +
206 +enum {
207 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
208 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
209 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
210 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
211 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
212 +};
213 +
214 +enum {
215 +       HTREE_LOCK_COMPAT_EX    = 0,
216 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
217 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
218 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
219 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
220 +                                 HTREE_LOCK_BIT_PW,
221 +};
222 +
223 +static int htree_lock_compat[] = {
224 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
225 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
226 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
227 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
228 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
229 +};
230 +
231 +/* max allowed htree-lock depth.
232 + * We only need depth=3 for ext4 although user can have higher value. */
233 +#define HTREE_LOCK_DEP_MAX     16
234 +
235 +#ifdef HTREE_LOCK_DEBUG
236 +
237 +static char *hl_name[] = {
238 +       [HTREE_LOCK_EX]         "EX",
239 +       [HTREE_LOCK_PW]         "PW",
240 +       [HTREE_LOCK_PR]         "PR",
241 +       [HTREE_LOCK_CW]         "CW",
242 +       [HTREE_LOCK_CR]         "CR",
243 +};
244 +
245 +/* lock stats */
246 +struct htree_lock_node_stats {
247 +       unsigned long long      blocked[HTREE_LOCK_MAX];
248 +       unsigned long long      granted[HTREE_LOCK_MAX];
249 +       unsigned long long      retried[HTREE_LOCK_MAX];
250 +       unsigned long long      events;
251 +};
252 +
253 +struct htree_lock_stats {
254 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
255 +       unsigned long long      granted[HTREE_LOCK_MAX];
256 +       unsigned long long      blocked[HTREE_LOCK_MAX];
257 +};
258 +
259 +static struct htree_lock_stats hl_stats;
260 +
261 +void htree_lock_stat_reset(void)
262 +{
263 +       memset(&hl_stats, 0, sizeof(hl_stats));
264 +}
265 +
266 +void htree_lock_stat_print(int depth)
267 +{
268 +       int     i;
269 +       int     j;
270 +
271 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
272 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
273 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
274 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
275 +       }
276 +       for (i = 0; i < depth; i++) {
277 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
278 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
279 +                       printk(KERN_DEBUG
280 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
281 +                               hl_name[j], hl_stats.nodes[i].granted[j],
282 +                               hl_stats.nodes[i].blocked[j],
283 +                               hl_stats.nodes[i].retried[j]);
284 +               }
285 +       }
286 +}
287 +
288 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
289 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
290 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
291 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
292 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
293 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
294 +
295 +#else /* !DEBUG */
296 +
297 +void htree_lock_stat_reset(void) {}
298 +void htree_lock_stat_print(int depth) {}
299 +
300 +#define lk_grant_inc(m)              do {} while (0)
301 +#define lk_block_inc(m)              do {} while (0)
302 +#define ln_grant_inc(d, m)    do {} while (0)
303 +#define ln_block_inc(d, m)    do {} while (0)
304 +#define ln_retry_inc(d, m)    do {} while (0)
305 +#define ln_event_inc(d)              do {} while (0)
306 +
307 +#endif /* DEBUG */
308 +
309 +EXPORT_SYMBOL(htree_lock_stat_reset);
310 +EXPORT_SYMBOL(htree_lock_stat_print);
311 +
312 +#define HTREE_DEP_ROOT           (-1)
313 +
314 +#define htree_spin_lock(lhead, dep)                            \
315 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
316 +#define htree_spin_unlock(lhead, dep)                          \
317 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
318 +
319 +#define htree_key_event_ignore(child, ln)                      \
320 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
321 +
322 +static int
323 +htree_key_list_empty(struct htree_lock_node *ln)
324 +{
325 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
326 +}
327 +
328 +static void
329 +htree_key_list_del_init(struct htree_lock_node *ln)
330 +{
331 +       struct htree_lock_node *tmp = NULL;
332 +
333 +       if (!list_empty(&ln->ln_minor_list)) {
334 +               tmp = list_entry(ln->ln_minor_list.next,
335 +                                struct htree_lock_node, ln_minor_list);
336 +               list_del_init(&ln->ln_minor_list);
337 +       }
338 +
339 +       if (list_empty(&ln->ln_major_list))
340 +               return;
341 +
342 +       if (tmp == NULL) { /* not on minor key list */
343 +               list_del_init(&ln->ln_major_list);
344 +       } else {
345 +               BUG_ON(!list_empty(&tmp->ln_major_list));
346 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
347 +       }
348 +}
349 +
350 +static void
351 +htree_key_list_replace_init(struct htree_lock_node *old,
352 +                           struct htree_lock_node *new)
353 +{
354 +       if (!list_empty(&old->ln_major_list))
355 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
356 +
357 +       if (!list_empty(&old->ln_minor_list))
358 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
359 +}
360 +
361 +static void
362 +htree_key_event_enqueue(struct htree_lock_child *child,
363 +                       struct htree_lock_node *ln, int dep, void *event)
364 +{
365 +       struct htree_lock_node *tmp;
366 +
367 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
368 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
369 +       if (event == NULL || htree_key_event_ignore(child, ln))
370 +               return;
371 +
372 +       /* shouldn't be a very long list */
373 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
374 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
375 +                       ln_event_inc(dep);
376 +                       if (child->lc_callback != NULL)
377 +                               child->lc_callback(tmp->ln_ev_target, event);
378 +               }
379 +       }
380 +}
381 +
382 +static int
383 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
384 +                       unsigned dep, int wait, void *event)
385 +{
386 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
387 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
388 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
389 +
390 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
391 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
392 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
393 +        * NL is only used for listener, user can't directly require NL mode */
394 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
395 +           (curln->ln_mode != HTREE_LOCK_PW &&
396 +            newln->ln_mode != HTREE_LOCK_PW)) {
397 +               /* no conflict, attach it on granted list of @curlk */
398 +               if (curln->ln_mode != HTREE_LOCK_NL) {
399 +                       list_add(&newln->ln_granted_list,
400 +                                &curln->ln_granted_list);
401 +               } else {
402 +                       /* replace key owner */
403 +                       htree_key_list_replace_init(curln, newln);
404 +               }
405 +
406 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
407 +               htree_key_event_enqueue(child, newln, dep, event);
408 +               ln_grant_inc(dep, newln->ln_mode);
409 +               return 1; /* still hold lh_lock */
410 +       }
411 +
412 +       if (!wait) { /* can't grant and don't want to wait */
413 +               ln_retry_inc(dep, newln->ln_mode);
414 +               newln->ln_mode = HTREE_LOCK_INVAL;
415 +               return -1; /* don't wait and just return -1 */
416 +       }
417 +
418 +       newlk->lk_task = current;
419 +       set_current_state(TASK_UNINTERRUPTIBLE);
420 +       /* conflict, attach it on blocked list of curlk */
421 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
422 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
423 +       ln_block_inc(dep, newln->ln_mode);
424 +
425 +       htree_spin_unlock(newlk->lk_head, dep);
426 +       /* wait to be given the lock */
427 +       if (newlk->lk_task != NULL)
428 +               schedule();
429 +       /* granted, no doubt, wake up will set me RUNNING */
430 +       if (event == NULL || htree_key_event_ignore(child, newln))
431 +               return 0; /* granted without lh_lock */
432 +
433 +       htree_spin_lock(newlk->lk_head, dep);
434 +       htree_key_event_enqueue(child, newln, dep, event);
435 +       return 1; /* still hold lh_lock */
436 +}
437 +
438 +/*
439 + * get PR/PW access to particular tree-node according to @dep and @key,
440 + * it will return -1 if @wait is false and can't immediately grant this lock.
441 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
442 + * @event if it's not NULL.
443 + * NB: ALWAYS called holding lhead::lh_lock
444 + */
445 +static int
446 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
447 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
448 +                        int wait, void *event)
449 +{
450 +       LIST_HEAD               (list);
451 +       struct htree_lock       *tmp;
452 +       struct htree_lock       *tmp2;
453 +       u16                     major;
454 +       u16                     minor;
455 +       u8                      reverse;
456 +       u8                      ma_bits;
457 +       u8                      mi_bits;
458 +
459 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
460 +       BUG_ON(htree_node_is_granted(lck, dep));
461 +
462 +       key = hash_long(key, lhead->lh_hbits);
463 +
464 +       mi_bits = lhead->lh_hbits >> 1;
465 +       ma_bits = lhead->lh_hbits - mi_bits;
466 +
467 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
468 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
469 +       lck->lk_nodes[dep].ln_mode = mode;
470 +
471 +       /*
472 +        * The major key list is an ordered list, so searches are started
473 +        * at the end of the list that is numerically closer to major_key,
474 +        * so at most half of the list will be walked (for well-distributed
475 +        * keys). The list traversal aborts early if the expected key
476 +        * location is passed.
477 +        */
478 +       reverse = (major >= (1 << (ma_bits - 1)));
479 +
480 +       if (reverse) {
481 +               list_for_each_entry_reverse(tmp,
482 +                                       &lhead->lh_children[dep].lc_list,
483 +                                       lk_nodes[dep].ln_major_list) {
484 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
485 +                               goto search_minor;
486 +
487 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
488 +                               /* attach _after_ @tmp */
489 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
490 +                                        &tmp->lk_nodes[dep].ln_major_list);
491 +                               goto out_grant_major;
492 +                       }
493 +               }
494 +
495 +               list_add(&lck->lk_nodes[dep].ln_major_list,
496 +                        &lhead->lh_children[dep].lc_list);
497 +               goto out_grant_major;
498 +
499 +       } else {
500 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
501 +                                   lk_nodes[dep].ln_major_list) {
502 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
503 +                               goto search_minor;
504 +
505 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
506 +                               /* insert _before_ @tmp */
507 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
508 +                                       &tmp->lk_nodes[dep].ln_major_list);
509 +                               goto out_grant_major;
510 +                       }
511 +               }
512 +
513 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
514 +                             &lhead->lh_children[dep].lc_list);
515 +               goto out_grant_major;
516 +       }
517 +
518 + search_minor:
519 +       /*
520 +        * NB: minor_key list doesn't have a "head", @list is just a
521 +        * temporary stub for helping list searching, make sure it's removed
522 +        * after searching.
523 +        * minor_key list is an ordered list too.
524 +        */
525 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
526 +
527 +       reverse = (minor >= (1 << (mi_bits - 1)));
528 +
529 +       if (reverse) {
530 +               list_for_each_entry_reverse(tmp2, &list,
531 +                                           lk_nodes[dep].ln_minor_list) {
532 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
533 +                               goto out_enqueue;
534 +
535 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
536 +                               /* attach _after_ @tmp2 */
537 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
538 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
539 +                               goto out_grant_minor;
540 +                       }
541 +               }
542 +
543 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
544 +
545 +       } else {
546 +               list_for_each_entry(tmp2, &list,
547 +                                   lk_nodes[dep].ln_minor_list) {
548 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
549 +                               goto out_enqueue;
550 +
551 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
552 +                               /* insert _before_ @tmp2 */
553 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
554 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
555 +                               goto out_grant_minor;
556 +                       }
557 +               }
558 +
559 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
560 +       }
561 +
562 + out_grant_minor:
563 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
564 +               /* new lock @lck is the first one on minor_key list, which
565 +                * means it has the smallest minor_key and it should
566 +                * replace @tmp as minor_key owner */
567 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
568 +                                 &lck->lk_nodes[dep].ln_major_list);
569 +       }
570 +       /* remove the temporary head */
571 +       list_del(&list);
572 +
573 + out_grant_major:
574 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
575 +       return 1; /* granted with holding lh_lock */
576 +
577 + out_enqueue:
578 +       list_del(&list); /* remove temprary head */
579 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
580 +}
581 +
582 +/*
583 + * release the key of @lck at level @dep, and grant any blocked locks.
584 + * caller will still listen on @key if @event is not NULL, which means
585 + * caller can see a event (by event_cb) while granting any lock with
586 + * the same key at level @dep.
587 + * NB: ALWAYS called holding lhead::lh_lock
588 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
589 + */
590 +static void
591 +htree_node_unlock_internal(struct htree_lock_head *lhead,
592 +                          struct htree_lock *curlk, unsigned dep, void *event)
593 +{
594 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
595 +       struct htree_lock       *grtlk = NULL;
596 +       struct htree_lock_node  *grtln;
597 +       struct htree_lock       *poslk;
598 +       struct htree_lock       *tmplk;
599 +
600 +       if (!htree_node_is_granted(curlk, dep))
601 +               return;
602 +
603 +       if (!list_empty(&curln->ln_granted_list)) {
604 +               /* there is another granted lock */
605 +               grtlk = list_entry(curln->ln_granted_list.next,
606 +                                  struct htree_lock,
607 +                                  lk_nodes[dep].ln_granted_list);
608 +               list_del_init(&curln->ln_granted_list);
609 +       }
610 +
611 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
612 +               /*
613 +                * @curlk is the only granted lock, so we confirmed:
614 +                * a) curln is key owner (attached on major/minor_list),
615 +                *    so if there is any blocked lock, it should be attached
616 +                *    on curln->ln_blocked_list
617 +                * b) we always can grant the first blocked lock
618 +                */
619 +               grtlk = list_entry(curln->ln_blocked_list.next,
620 +                                  struct htree_lock,
621 +                                  lk_nodes[dep].ln_blocked_list);
622 +               BUG_ON(grtlk->lk_task == NULL);
623 +               wake_up_process(grtlk->lk_task);
624 +       }
625 +
626 +       if (event != NULL &&
627 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
628 +               curln->ln_ev_target = event;
629 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
630 +       } else {
631 +               curln->ln_mode = HTREE_LOCK_INVAL;
632 +       }
633 +
634 +       if (grtlk == NULL) { /* I must be the only one locking this key */
635 +               struct htree_lock_node *tmpln;
636 +
637 +               BUG_ON(htree_key_list_empty(curln));
638 +
639 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
640 +                       return;
641 +
642 +               /* not listening */
643 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
644 +                       htree_key_list_del_init(curln);
645 +                       return;
646 +               }
647 +
648 +               tmpln = list_entry(curln->ln_alive_list.next,
649 +                                  struct htree_lock_node, ln_alive_list);
650 +
651 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
652 +
653 +               htree_key_list_replace_init(curln, tmpln);
654 +               list_del_init(&curln->ln_alive_list);
655 +
656 +               return;
657 +       }
658 +
659 +       /* have a granted lock */
660 +       grtln = &grtlk->lk_nodes[dep];
661 +       if (!list_empty(&curln->ln_blocked_list)) {
662 +               /* only key owner can on both lists */
663 +               BUG_ON(htree_key_list_empty(curln));
664 +
665 +               if (list_empty(&grtln->ln_blocked_list)) {
666 +                       list_add(&grtln->ln_blocked_list,
667 +                                &curln->ln_blocked_list);
668 +               }
669 +               list_del_init(&curln->ln_blocked_list);
670 +       }
671 +       /*
672 +        * NB: this is the tricky part:
673 +        * We have only two modes for child-lock (PR and PW), also,
674 +        * only owner of the key (attached on major/minor_list) can be on
675 +        * both blocked_list and granted_list, so @grtlk must be one
676 +        * of these two cases:
677 +        *
678 +        * a) @grtlk is taken from granted_list, which means we've granted
679 +        *    more than one lock so @grtlk has to be PR, the first blocked
680 +        *    lock must be PW and we can't grant it at all.
681 +        *    So even @grtlk is not owner of the key (empty blocked_list),
682 +        *    we don't care because we can't grant any lock.
683 +        * b) we just grant a new lock which is taken from head of blocked
684 +        *    list, and it should be the first granted lock, and it should
685 +        *    be the first one linked on blocked_list.
686 +        *
687 +        * Either way, we can get correct result by iterating blocked_list
688 +        * of @grtlk, and don't have to bother on how to find out
689 +        * owner of current key.
690 +        */
691 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
692 +                                lk_nodes[dep].ln_blocked_list) {
693 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
694 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
695 +                       break;
696 +               /* grant all readers */
697 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
698 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
699 +                        &grtln->ln_granted_list);
700 +
701 +               BUG_ON(poslk->lk_task == NULL);
702 +               wake_up_process(poslk->lk_task);
703 +       }
704 +
705 +       /* if @curln is the owner of this key, replace it with @grtln */
706 +       if (!htree_key_list_empty(curln))
707 +               htree_key_list_replace_init(curln, grtln);
708 +
709 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
710 +               list_del_init(&curln->ln_alive_list);
711 +}
712 +
713 +/*
714 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
715 + * and 0 only if @wait is false and can't grant it immediately
716 + */
717 +int
718 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
719 +                   u32 key, unsigned dep, int wait, void *event)
720 +{
721 +       struct htree_lock_head *lhead = lck->lk_head;
722 +       int rc;
723 +
724 +       BUG_ON(dep >= lck->lk_depth);
725 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
726 +
727 +       htree_spin_lock(lhead, dep);
728 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
729 +       if (rc != 0)
730 +               htree_spin_unlock(lhead, dep);
731 +       return rc >= 0;
732 +}
733 +EXPORT_SYMBOL(htree_node_lock_try);
734 +
735 +/* it's wrapper of htree_node_unlock_internal */
736 +void
737 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
738 +{
739 +       struct htree_lock_head *lhead = lck->lk_head;
740 +
741 +       BUG_ON(dep >= lck->lk_depth);
742 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
743 +
744 +       htree_spin_lock(lhead, dep);
745 +       htree_node_unlock_internal(lhead, lck, dep, event);
746 +       htree_spin_unlock(lhead, dep);
747 +}
748 +EXPORT_SYMBOL(htree_node_unlock);
749 +
750 +/* stop listening on child-lock level @dep */
751 +void
752 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
753 +{
754 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
755 +       struct htree_lock_node *tmp;
756 +
757 +       BUG_ON(htree_node_is_granted(lck, dep));
758 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
759 +       BUG_ON(!list_empty(&ln->ln_granted_list));
760 +
761 +       if (!htree_node_is_listening(lck, dep))
762 +               return;
763 +
764 +       htree_spin_lock(lck->lk_head, dep);
765 +       ln->ln_mode = HTREE_LOCK_INVAL;
766 +       ln->ln_ev_target = NULL;
767 +
768 +       if (htree_key_list_empty(ln)) { /* not owner */
769 +               list_del_init(&ln->ln_alive_list);
770 +               goto out;
771 +       }
772 +
773 +       /* I'm the owner... */
774 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
775 +               htree_key_list_del_init(ln);
776 +               goto out;
777 +       }
778 +
779 +       tmp = list_entry(ln->ln_alive_list.next,
780 +                        struct htree_lock_node, ln_alive_list);
781 +
782 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
783 +       htree_key_list_replace_init(ln, tmp);
784 +       list_del_init(&ln->ln_alive_list);
785 + out:
786 +       htree_spin_unlock(lck->lk_head, dep);
787 +}
788 +EXPORT_SYMBOL(htree_node_stop_listen);
789 +
790 +/* release all child-locks if we have any */
791 +static void
792 +htree_node_release_all(struct htree_lock *lck)
793 +{
794 +       int     i;
795 +
796 +       for (i = 0; i < lck->lk_depth; i++) {
797 +               if (htree_node_is_granted(lck, i))
798 +                       htree_node_unlock(lck, i, NULL);
799 +               else if (htree_node_is_listening(lck, i))
800 +                       htree_node_stop_listen(lck, i);
801 +       }
802 +}
803 +
804 +/*
805 + * obtain htree lock, it could be blocked inside if there's conflict
806 + * with any granted or blocked lock and @wait is true.
807 + * NB: ALWAYS called holding lhead::lh_lock
808 + */
809 +static int
810 +htree_lock_internal(struct htree_lock *lck, int wait)
811 +{
812 +       struct htree_lock_head *lhead = lck->lk_head;
813 +       int     granted = 0;
814 +       int     blocked = 0;
815 +       int     i;
816 +
817 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
818 +               if (lhead->lh_ngranted[i] != 0)
819 +                       granted |= 1 << i;
820 +               if (lhead->lh_nblocked[i] != 0)
821 +                       blocked |= 1 << i;
822 +       }
823 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
824 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
825 +               /* will block current lock even it just conflicts with any
826 +                * other blocked lock, so lock like EX wouldn't starve */
827 +               if (!wait)
828 +                       return -1;
829 +               lhead->lh_nblocked[lck->lk_mode]++;
830 +               lk_block_inc(lck->lk_mode);
831 +
832 +               lck->lk_task = current;
833 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
834 +
835 +               set_current_state(TASK_UNINTERRUPTIBLE);
836 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
837 +               /* wait to be given the lock */
838 +               if (lck->lk_task != NULL)
839 +                       schedule();
840 +               /* granted, no doubt. wake up will set me RUNNING */
841 +               return 0; /* without lh_lock */
842 +       }
843 +       lhead->lh_ngranted[lck->lk_mode]++;
844 +       lk_grant_inc(lck->lk_mode);
845 +       return 1;
846 +}
847 +
848 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
849 +static void
850 +htree_unlock_internal(struct htree_lock *lck)
851 +{
852 +       struct htree_lock_head *lhead = lck->lk_head;
853 +       struct htree_lock *tmp;
854 +       struct htree_lock *tmp2;
855 +       int granted = 0;
856 +       int i;
857 +
858 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
859 +
860 +       lhead->lh_ngranted[lck->lk_mode]--;
861 +       lck->lk_mode = HTREE_LOCK_INVAL;
862 +
863 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
864 +               if (lhead->lh_ngranted[i] != 0)
865 +                       granted |= 1 << i;
866 +       }
867 +       list_for_each_entry_safe(tmp, tmp2,
868 +                                &lhead->lh_blocked_list, lk_blocked_list) {
869 +               /* conflict with any granted lock? */
870 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
871 +                       break;
872 +
873 +               list_del_init(&tmp->lk_blocked_list);
874 +
875 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
876 +
877 +               lhead->lh_nblocked[tmp->lk_mode]--;
878 +               lhead->lh_ngranted[tmp->lk_mode]++;
879 +               granted |= 1 << tmp->lk_mode;
880 +
881 +               BUG_ON(tmp->lk_task == NULL);
882 +               wake_up_process(tmp->lk_task);
883 +       }
884 +}
885 +
886 +/* it's wrapper of htree_lock_internal and exported interface.
887 + * It always return 1 with granted lock if @wait is true, it can return 0
888 + * if @wait is false and locking request can't be granted immediately */
889 +int
890 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
891 +              htree_lock_mode_t mode, int wait)
892 +{
893 +       int     rc;
894 +
895 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
896 +       BUG_ON(lck->lk_head != NULL);
897 +       BUG_ON(lck->lk_task != NULL);
898 +
899 +       lck->lk_head = lhead;
900 +       lck->lk_mode = mode;
901 +
902 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
903 +       rc = htree_lock_internal(lck, wait);
904 +       if (rc != 0)
905 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
906 +       return rc >= 0;
907 +}
908 +EXPORT_SYMBOL(htree_lock_try);
909 +
910 +/* it's wrapper of htree_unlock_internal and exported interface.
911 + * It will release all htree_node_locks and htree_lock */
912 +void
913 +htree_unlock(struct htree_lock *lck)
914 +{
915 +       BUG_ON(lck->lk_head == NULL);
916 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
917 +
918 +       htree_node_release_all(lck);
919 +
920 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
921 +       htree_unlock_internal(lck);
922 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
923 +       lck->lk_head = NULL;
924 +       lck->lk_task = NULL;
925 +}
926 +EXPORT_SYMBOL(htree_unlock);
927 +
928 +/* change lock mode */
929 +void
930 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
931 +{
932 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
933 +       lck->lk_mode = mode;
934 +}
935 +EXPORT_SYMBOL(htree_change_mode);
936 +
937 +/* release htree lock, and lock it again with new mode.
938 + * This function will first release all htree_node_locks and htree_lock,
939 + * then try to gain htree_lock with new @mode.
940 + * It always return 1 with granted lock if @wait is true, it can return 0
941 + * if @wait is false and locking request can't be granted immediately */
942 +int
943 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
944 +{
945 +       struct htree_lock_head *lhead = lck->lk_head;
946 +       int rc;
947 +
948 +       BUG_ON(lhead == NULL);
949 +       BUG_ON(lck->lk_mode == mode);
950 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
951 +
952 +       htree_node_release_all(lck);
953 +
954 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
955 +       htree_unlock_internal(lck);
956 +       lck->lk_mode = mode;
957 +       rc = htree_lock_internal(lck, wait);
958 +       if (rc != 0)
959 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
960 +       return rc >= 0;
961 +}
962 +EXPORT_SYMBOL(htree_change_lock_try);
963 +
964 +/* create a htree_lock head with @depth levels (number of child-locks),
965 + * it is a per resoruce structure */
966 +struct htree_lock_head *
967 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
968 +{
969 +       struct htree_lock_head *lhead;
970 +       int  i;
971 +
972 +       if (depth > HTREE_LOCK_DEP_MAX) {
973 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
974 +                       depth, HTREE_LOCK_DEP_MAX);
975 +               return NULL;
976 +       }
977 +
978 +       lhead = kzalloc(offsetof(struct htree_lock_head,
979 +                                lh_children[depth]) + priv, GFP_NOFS);
980 +       if (lhead == NULL)
981 +               return NULL;
982 +
983 +       if (hbits < HTREE_HBITS_MIN)
984 +               lhead->lh_hbits = HTREE_HBITS_MIN;
985 +       else if (hbits > HTREE_HBITS_MAX)
986 +               lhead->lh_hbits = HTREE_HBITS_MAX;
987 +
988 +       lhead->lh_lock = 0;
989 +       lhead->lh_depth = depth;
990 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
991 +       if (priv > 0) {
992 +               lhead->lh_private = (void *)lhead +
993 +                       offsetof(struct htree_lock_head, lh_children[depth]);
994 +       }
995 +
996 +       for (i = 0; i < depth; i++) {
997 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
998 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
999 +       }
1000 +       return lhead;
1001 +}
1002 +EXPORT_SYMBOL(htree_lock_head_alloc);
1003 +
1004 +/* free the htree_lock head */
1005 +void
1006 +htree_lock_head_free(struct htree_lock_head *lhead)
1007 +{
1008 +       int     i;
1009 +
1010 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
1011 +       for (i = 0; i < lhead->lh_depth; i++)
1012 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1013 +       kfree(lhead);
1014 +}
1015 +EXPORT_SYMBOL(htree_lock_head_free);
1016 +
1017 +/* register event callback for @events of child-lock at level @dep */
1018 +void
1019 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1020 +                       unsigned events, htree_event_cb_t callback)
1021 +{
1022 +       BUG_ON(lhead->lh_depth <= dep);
1023 +       lhead->lh_children[dep].lc_events = events;
1024 +       lhead->lh_children[dep].lc_callback = callback;
1025 +}
1026 +EXPORT_SYMBOL(htree_lock_event_attach);
1027 +
1028 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1029 + * extra-bytes as private data for caller */
1030 +struct htree_lock *
1031 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1032 +{
1033 +       struct htree_lock *lck;
1034 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1035 +
1036 +       if (depth > HTREE_LOCK_DEP_MAX) {
1037 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1038 +                       depth, HTREE_LOCK_DEP_MAX);
1039 +               return NULL;
1040 +       }
1041 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1042 +       if (lck == NULL)
1043 +               return NULL;
1044 +
1045 +       if (pbytes != 0)
1046 +               lck->lk_private = (void *)lck + i;
1047 +       lck->lk_mode = HTREE_LOCK_INVAL;
1048 +       lck->lk_depth = depth;
1049 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1050 +
1051 +       for (i = 0; i < depth; i++) {
1052 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1053 +
1054 +               node->ln_mode = HTREE_LOCK_INVAL;
1055 +               INIT_LIST_HEAD(&node->ln_major_list);
1056 +               INIT_LIST_HEAD(&node->ln_minor_list);
1057 +               INIT_LIST_HEAD(&node->ln_alive_list);
1058 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1059 +               INIT_LIST_HEAD(&node->ln_granted_list);
1060 +       }
1061 +
1062 +       return lck;
1063 +}
1064 +EXPORT_SYMBOL(htree_lock_alloc);
1065 +
1066 +/* free htree_lock node */
1067 +void
1068 +htree_lock_free(struct htree_lock *lck)
1069 +{
1070 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1071 +       kfree(lck);
1072 +}
1073 +EXPORT_SYMBOL(htree_lock_free);
1074 --- linux-2.6.32-131.6.1/fs/ext4/ext4.h 2011-10-06 20:10:49.000000000 +0800
1075 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/ext4.h     2011-12-08 18:25:00.000000000 +0800
1076 @@ -28,6 +28,7 @@
1077  #include <linux/mutex.h>
1078  #include <linux/timer.h>
1079  #include <linux/wait.h>
1080 +#include <linux/htree_lock.h>
1081  #include <linux/blockgroup_lock.h>
1082  #include <linux/percpu_counter.h>
1083  #ifdef __KERNEL__
1084 @@ -1277,6 +1278,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
1085  #define EXT4_FEATURE_INCOMPAT_MMP               0x0100
1086  #define EXT4_FEATURE_INCOMPAT_FLEX_BG          0x0200
1087  #define EXT4_FEATURE_INCOMPAT_DIRDATA          0x1000
1088 +#define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000
1089  
1090  #define EXT4_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
1091  #define EXT4_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
1092 @@ -1286,7 +1288,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
1093                                          EXT4_FEATURE_INCOMPAT_64BIT| \
1094                                          EXT4_FEATURE_INCOMPAT_FLEX_BG| \
1095                                          EXT4_FEATURE_INCOMPAT_MMP| \
1096 -                                        EXT4_FEATURE_INCOMPAT_DIRDATA)
1097 +                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
1098 +                                        EXT4_FEATURE_INCOMPAT_LARGEDIR)
1099  
1100  #define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1101                                          EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1102 @@ -1536,6 +1539,76 @@ ext4_group_first_block_no(struct super_b
1103   */
1104  #define ERR_BAD_DX_DIR -75000
1105  
1106 +/* htree levels for ext4 */
1107 +#define EXT4_HTREE_LEVEL_COMPAT 2
1108 +#define EXT4_HTREE_LEVEL       3
1109 +
1110 +static inline int
1111 +ext4_dir_htree_level(struct super_block *sb)
1112 +{
1113 +       return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
1114 +               EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
1115 +}
1116 +
1117 +/* assume name-hash is protected by upper layer */
1118 +#define EXT4_HTREE_LOCK_HASH   0
1119 +
1120 +enum ext4_pdo_lk_types {
1121 +#if EXT4_HTREE_LOCK_HASH
1122 +       EXT4_LK_HASH,
1123 +#endif
1124 +       EXT4_LK_DX,             /* index block */
1125 +       EXT4_LK_DE,             /* directory entry block */
1126 +       EXT4_LK_SPIN,           /* spinlock */
1127 +       EXT4_LK_MAX,
1128 +};
1129 +
1130 +/* read-only bit */
1131 +#define EXT4_LB_RO(b)          (1 << (b))
1132 +/* read + write, high bits for writer */
1133 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1134 +
1135 +enum ext4_pdo_lock_bits {
1136 +       /* DX lock bits */
1137 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
1138 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
1139 +       /* DE lock bits */
1140 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
1141 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
1142 +       /* DX spinlock bits */
1143 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
1144 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
1145 +       /* accurate searching */
1146 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
1147 +};
1148 +
1149 +enum ext4_pdo_lock_opc {
1150 +       /* external */
1151 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1152 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1153 +                                  EXT4_LB_EXACT),
1154 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1155 +                                  EXT4_LB_EXACT),
1156 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1157 +
1158 +       /* internal */
1159 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1160 +                                  EXT4_LB_EXACT),
1161 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1162 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1163 +};
1164 +
1165 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1166 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
1167 +
1168 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1169 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
1170 +
1171 +extern void ext4_htree_lock(struct htree_lock *lck,
1172 +                           struct htree_lock_head *lhead,
1173 +                           struct inode *dir, unsigned flags);
1174 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
1175 +
1176  void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
1177                         ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
1178  
1179 @@ -1769,14 +1842,16 @@ extern int ext4_htree_fill_tree(struct f
1180  extern struct inode *ext4_create_inode(handle_t *handle,
1181                                        struct inode * dir, int mode);
1182  extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1183 -                         struct inode *inode);
1184 +                         struct inode *inode, struct htree_lock *lck);
1185  extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
1186                              struct ext4_dir_entry_2 * de_del,
1187                              struct buffer_head * bh);
1188  extern struct buffer_head * ext4_find_entry(struct inode *dir,
1189                                             const struct qstr *d_name,
1190 -                                           struct ext4_dir_entry_2 ** res_dir);
1191 -#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
1192 +                                           struct ext4_dir_entry_2 **res_dir,
1193 +                                           struct htree_lock *lck);
1194 +#define ll_ext4_find_entry(inode, dentry, res_dir, lck) \
1195 +       ext4_find_entry(inode, &(dentry)->d_name, res_dir, lck)
1196  extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
1197                                struct inode *inode, const void *, const void *);
1198  extern struct buffer_head *ext4_append(handle_t *handle,
1199 @@ -1893,13 +1968,15 @@ static inline void ext4_r_blocks_count_s
1200         es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
1201  }
1202  
1203 -static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
1204 +static inline loff_t ext4_isize(struct super_block *sb,
1205 +                               struct ext4_inode *raw_inode)
1206  {
1207 -       if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
1208 +       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
1209 +           S_ISREG(le16_to_cpu(raw_inode->i_mode)))
1210                 return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
1211                         le32_to_cpu(raw_inode->i_size_lo);
1212 -       else
1213 -               return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
1214 +
1215 +       return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
1216  }
1217  
1218  static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
1219 --- linux-2.6.32-131.6.1/fs/ext4/namei.c        2011-10-06 20:10:49.000000000 +0800
1220 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/namei.c    2011-12-08 15:40:39.000000000 +0800
1221 @@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
1222                                  struct inode *dir,
1223                                  struct dx_hash_info *hinfo,
1224                                  struct dx_frame *frame,
1225 -                                int *err);
1226 +                                struct htree_lock *lck, int *err);
1227  static void dx_release(struct dx_frame *frames);
1228  static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1229                        struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1230 @@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
1231  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1232                                  struct dx_frame *frame,
1233                                  struct dx_frame *frames,
1234 -                                __u32 *start_hash);
1235 +                                __u32 *start_hash, struct htree_lock *lck);
1236  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1237                 const struct qstr *d_name,
1238                 struct ext4_dir_entry_2 **res_dir,
1239 -               int *err);
1240 +               struct htree_lock *lck, int *err);
1241  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1242 -                            struct inode *inode);
1243 +                            struct inode *inode, struct htree_lock *lck);
1244  
1245  /*
1246   * p is at least 6 bytes before the end of page
1247 @@ -225,7 +225,7 @@ struct dx_root_info * dx_get_dx_info(str
1248  
1249  static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
1250  {
1251 -       return le32_to_cpu(entry->block) & 0x00ffffff;
1252 +       return le32_to_cpu(entry->block) & 0x0fffffff;
1253  }
1254  
1255  static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
1256 @@ -298,7 +298,7 @@ static void dx_show_index(char * label, 
1257         printk("\n");
1258  }
1259  
1260 -struct stats
1261 +etruct stats
1262  {
1263         unsigned names;
1264         unsigned space;
1265 @@ -368,6 +368,223 @@ struct stats dx_show_entries(struct dx_h
1266  }
1267  #endif /* DX_DEBUG */
1268  
1269 +/* private data for htree_lock */
1270 +struct ext4_dir_lock_data {
1271 +       unsigned                ld_flags;  /* bits-map for lock types */
1272 +       unsigned                ld_count;  /* # entries of the last DX block */
1273 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1274 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1275 +};
1276 +
1277 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1278 +
1279 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1280 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1281 +
1282 +static void ext4_htree_event_cb(void *target, void *event)
1283 +{
1284 +       u64 *block = (u64 *)target;
1285 +
1286 +       if (*block == dx_get_block((struct dx_entry *)event))
1287 +               *block = EXT4_HTREE_NODE_CHANGED;
1288 +}
1289 +
1290 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1291 +{
1292 +       struct htree_lock_head *lhead;
1293 +
1294 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1295 +       if (lhead != NULL) {
1296 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1297 +                                       ext4_htree_event_cb);
1298 +       }
1299 +       return lhead;
1300 +}
1301 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1302 +
1303 +struct htree_lock *ext4_htree_lock_alloc(void)
1304 +{
1305 +       return htree_lock_alloc(EXT4_LK_MAX,
1306 +                               sizeof(struct ext4_dir_lock_data));
1307 +}
1308 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1309 +
1310 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1311 +{
1312 +       switch (flags) {
1313 +       default: /* 0 or unknown flags require EX lock */
1314 +               return HTREE_LOCK_EX;
1315 +       case EXT4_HLOCK_READDIR:
1316 +               return HTREE_LOCK_PR;
1317 +       case EXT4_HLOCK_LOOKUP:
1318 +               return HTREE_LOCK_CR;
1319 +       case EXT4_HLOCK_DEL:
1320 +       case EXT4_HLOCK_ADD:
1321 +               return HTREE_LOCK_CW;
1322 +       }
1323 +}
1324 +
1325 +/* return PR for read-only operations, otherwise return EX */
1326 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1327 +{
1328 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1329 +
1330 +       /* 0 requires EX lock */
1331 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1332 +}
1333 +
1334 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1335 +{
1336 +       int writer;
1337 +
1338 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1339 +               return 1;
1340 +
1341 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1342 +                EXT4_LB_DE;
1343 +       if (writer) /* all readers & writers are excluded? */
1344 +               return lck->lk_mode == HTREE_LOCK_EX;
1345 +
1346 +       /* all writers are excluded? */
1347 +       return lck->lk_mode == HTREE_LOCK_PR ||
1348 +              lck->lk_mode == HTREE_LOCK_PW ||
1349 +              lck->lk_mode == HTREE_LOCK_EX;
1350 +}
1351 +
1352 +/* relock htree_lock with EX mode if it's change operation, otherwise
1353 + * relock it with PR mode. It's noop if PDO is disabled. */
1354 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1355 +{
1356 +       if (!ext4_htree_safe_locked(lck)) {
1357 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1358 +
1359 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1360 +       }
1361 +}
1362 +
1363 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1364 +                    struct inode *dir, unsigned flags)
1365 +{
1366 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1367 +                                             ext4_htree_safe_mode(flags);
1368 +
1369 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1370 +       htree_lock(lck, lhead, mode);
1371 +       if (!is_dx(dir))
1372 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1373 +}
1374 +EXPORT_SYMBOL(ext4_htree_lock);
1375 +
1376 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1377 +                               unsigned lmask, int wait, void *ev)
1378 +{
1379 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1380 +       u32     mode;
1381 +
1382 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1383 +       if (ext4_htree_safe_locked(lck) ||
1384 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1385 +               return 1;
1386 +
1387 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1388 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1389 +       while (1) {
1390 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1391 +                       return 1;
1392 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1393 +                       return 0;
1394 +               cpu_relax(); /* spin until granted */
1395 +       }
1396 +}
1397 +
1398 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1399 +{
1400 +       return ext4_htree_safe_locked(lck) ||
1401 +              htree_node_is_granted(lck, ffz(~lmask));
1402 +}
1403 +
1404 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1405 +                                  unsigned lmask, void *buf)
1406 +{
1407 +       /* NB: it's safe to call mutiple times or even it's not locked */
1408 +       if (!ext4_htree_safe_locked(lck) &&
1409 +            htree_node_is_granted(lck, ffz(~lmask)))
1410 +               htree_node_unlock(lck, ffz(~lmask), buf);
1411 +}
1412 +
1413 +#define ext4_htree_dx_lock(lck, key)           \
1414 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1415 +#define ext4_htree_dx_lock_try(lck, key)       \
1416 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1417 +#define ext4_htree_dx_unlock(lck)              \
1418 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1419 +#define ext4_htree_dx_locked(lck)              \
1420 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1421 +
1422 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1423 +{
1424 +       struct ext4_dir_lock_data *ld;
1425 +
1426 +       if (ext4_htree_safe_locked(lck))
1427 +               return;
1428 +
1429 +       ld = ext4_htree_lock_data(lck);
1430 +       switch (ld->ld_flags) {
1431 +       default:
1432 +               return;
1433 +       case EXT4_HLOCK_LOOKUP:
1434 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1435 +               return;
1436 +       case EXT4_HLOCK_DEL:
1437 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1438 +               return;
1439 +       case EXT4_HLOCK_ADD:
1440 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1441 +               return;
1442 +       }
1443 +}
1444 +
1445 +#define ext4_htree_de_lock(lck, key)           \
1446 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1447 +#define ext4_htree_de_unlock(lck)              \
1448 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1449 +
1450 +#define ext4_htree_spin_lock(lck, key, event)  \
1451 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1452 +#define ext4_htree_spin_unlock(lck)            \
1453 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1454 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1455 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1456 +
1457 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1458 +{
1459 +       if (!ext4_htree_safe_locked(lck) &&
1460 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1461 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1462 +}
1463 +
1464 +enum {
1465 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1466 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1467 +       DX_HASH_COL_NO,         /* there is no collision */
1468 +};
1469 +
1470 +static int dx_probe_hash_collision(struct htree_lock *lck,
1471 +                                  struct dx_entry *entries,
1472 +                                  struct dx_entry *at, u32 hash)
1473 +{
1474 +       if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1475 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1476 +
1477 +       } else if (at == entries + dx_get_count(entries) - 1) {
1478 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1479 +
1480 +       } else { /* hash collision? */
1481 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1482 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1483 +       }
1484 +}
1485 +
1486  /*
1487   * Probe for a directory leaf block to search.
1488   *
1489 @@ -379,16 +596,17 @@ struct stats dx_show_entries(struct dx_h
1490   */
1491  static struct dx_frame *
1492  dx_probe(const struct qstr *d_name, struct inode *dir,
1493 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1494 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1495 +        struct htree_lock *lck, int *err)
1496  {
1497         unsigned count, indirect;
1498 -       struct dx_entry *at, *entries, *p, *q, *m;
1499 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1500         struct dx_root_info * info;
1501         struct buffer_head *bh;
1502         struct dx_frame *frame = frame_in;
1503         u32 hash;
1504  
1505 -       frame->bh = NULL;
1506 +       memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
1507         if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
1508                 goto fail;
1509  
1510 @@ -418,9 +636,16 @@ dx_probe(const struct qstr *d_name, stru
1511                 goto fail;
1512         }
1513  
1514 -       if ((indirect = info->indirect_levels) > 1) {
1515 -               ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
1516 -                            info->indirect_levels);
1517 +       indirect = info->indirect_levels;
1518 +       if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
1519 +               ext4_warning(dir->i_sb,
1520 +                            "Directory (ino: %lu) htree depth %#06x exceed "
1521 +                            "supported value", dir->i_ino,
1522 +                            ext4_dir_htree_level(dir->i_sb));
1523 +               if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
1524 +                       ext4_warning(dir->i_sb, "Enable large directory "
1525 +                                               "feature to access it");
1526 +               }
1527                 brelse(bh);
1528                 *err = ERR_BAD_DX_DIR;
1529                 goto fail;
1530 @@ -440,8 +665,15 @@ dx_probe(const struct qstr *d_name, stru
1531         dxtrace(printk("Look up %x", hash));
1532         while (1)
1533         {
1534 +               if (indirect == 0) { /* the last index level */
1535 +                       /* NB: ext4_htree_dx_lock() could be noop if
1536 +                        * DX-lock flag is not set for current operation */
1537 +                       ext4_htree_dx_lock(lck, dx);
1538 +                       ext4_htree_spin_lock(lck, dx, NULL);
1539 +               }
1540                 count = dx_get_count(entries);
1541 -               if (!count || count > dx_get_limit(entries)) {
1542 +               if (count == 0 || count > dx_get_limit(entries)) {
1543 +                       ext4_htree_spin_unlock(lck); /* release spin */
1544                         ext4_warning(dir->i_sb,
1545                                      "dx entry: no count or count > limit");
1546                         brelse(bh);
1547 @@ -482,9 +714,73 @@ dx_probe(const struct qstr *d_name, stru
1548                 frame->bh = bh;
1549                 frame->entries = entries;
1550                 frame->at = at;
1551 -               if (!indirect--) return frame;
1552 +
1553 +               if (indirect == 0) { /* the last index level */
1554 +                       struct ext4_dir_lock_data *ld;
1555 +                       u64 myblock;
1556 +
1557 +                       /* By default we only lock DE-block, however, we will
1558 +                        * also lock the last level DX-block if:
1559 +                        * a) there is hash collision
1560 +                        *    we will set DX-lock flag (a few lines below)
1561 +                        *    and redo to lock DX-block
1562 +                        *    see detail in dx_probe_hash_collision()
1563 +                        * b) it's a retry from splitting
1564 +                        *    we need to lock the last level DX-block so nobody
1565 +                        *    else can split any leaf blocks under the same
1566 +                        *    DX-block, see detail in ext4_dx_add_entry()
1567 +                        */
1568 +                       if (ext4_htree_dx_locked(lck)) {
1569 +                               /* DX-block is locked, just lock DE-block
1570 +                                * and return */
1571 +                               ext4_htree_spin_unlock(lck);
1572 +                               if (!ext4_htree_safe_locked(lck))
1573 +                                       ext4_htree_de_lock(lck, frame->at);
1574 +                               return frame;
1575 +                       }
1576 +                       /* it's pdirop and no DX lock */
1577 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1578 +                           DX_HASH_COL_YES) {
1579 +                               /* found hash collision, set DX-lock flag
1580 +                                * and retry to abtain DX-lock */
1581 +                               ext4_htree_spin_unlock(lck);
1582 +                               ext4_htree_dx_need_lock(lck);
1583 +                               continue;
1584 +                       }
1585 +                       ld = ext4_htree_lock_data(lck);
1586 +                       /* because I don't lock DX, so @at can't be trusted
1587 +                        * after I release spinlock so I have to save it */
1588 +                       ld->ld_at = at;
1589 +                       ld->ld_at_entry = *at;
1590 +                       ld->ld_count = dx_get_count(entries);
1591 +
1592 +                       frame->at = &ld->ld_at_entry;
1593 +                       myblock = dx_get_block(at);
1594 +
1595 +                       /* NB: ordering locking */
1596 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1597 +                       /* other thread can split this DE-block because:
1598 +                        * a) I don't have lock for the DE-block yet
1599 +                        * b) I released spinlock on DX-block
1600 +                        * if it happened I can detect it by listening
1601 +                        * splitting event on this DE-block */
1602 +                       ext4_htree_de_lock(lck, frame->at);
1603 +                       ext4_htree_spin_stop_listen(lck);
1604 +
1605 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1606 +                               /* someone split this DE-block before
1607 +                                * I locked it, I need to retry and lock
1608 +                                * valid DE-block */
1609 +                               ext4_htree_de_unlock(lck);
1610 +                               continue;
1611 +                       }
1612 +                       return frame;
1613 +               }
1614 +               dx = at;
1615 +               indirect--;
1616                 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
1617                         goto fail2;
1618 +
1619                 at = entries = ((struct dx_node *) bh->b_data)->entries;
1620                 if (dx_get_limit(entries) != dx_node_limit (dir)) {
1621                         ext4_warning(dir->i_sb,
1622 @@ -512,13 +808,18 @@ fail:
1623  static void dx_release (struct dx_frame *frames)
1624  {
1625         struct dx_root_info *info;
1626 +       int i;
1627 +
1628         if (frames[0].bh == NULL)
1629                 return;
1630  
1631         info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
1632 -       if (info->indirect_levels)
1633 -               brelse(frames[1].bh);
1634 -       brelse(frames[0].bh);
1635 +       for (i = 0; i <= info->indirect_levels; i++) {
1636 +               if (frames[i].bh == NULL)
1637 +                       break;
1638 +               brelse(frames[i].bh);
1639 +               frames[i].bh = NULL;
1640 +       }
1641  }
1642  
1643  /*
1644 @@ -541,7 +842,7 @@ static void dx_release (struct dx_frame 
1645  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1646                                  struct dx_frame *frame,
1647                                  struct dx_frame *frames,
1648 -                                __u32 *start_hash)
1649 +                                __u32 *start_hash, struct htree_lock *lck)
1650  {
1651         struct dx_frame *p;
1652         struct buffer_head *bh;
1653 @@ -556,12 +857,22 @@ static int ext4_htree_next_block(struct 
1654          * this loop, num_frames indicates the number of interior
1655          * nodes need to be read.
1656          */
1657 +       ext4_htree_de_unlock(lck);
1658         while (1) {
1659 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1660 -                       break;
1661 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1662 +                       /* num_frames > 0 :
1663 +                        *   DX block
1664 +                        * ext4_htree_dx_locked:
1665 +                        *   frame->at is reliable pointer returned by dx_probe,
1666 +                        *   otherwise dx_probe already knew no collision */
1667 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1668 +                               break;
1669 +               }
1670                 if (p == frames)
1671                         return 0;
1672                 num_frames++;
1673 +               if (num_frames == 1)
1674 +                       ext4_htree_dx_unlock(lck);
1675                 p--;
1676         }
1677  
1678 @@ -584,6 +895,13 @@ static int ext4_htree_next_block(struct 
1679          * block so no check is necessary
1680          */
1681         while (num_frames--) {
1682 +               if (num_frames == 0) {
1683 +                       /* it's not always necessary, we just don't want to
1684 +                        * detect hash collision again */
1685 +                       ext4_htree_dx_need_lock(lck);
1686 +                       ext4_htree_dx_lock(lck, p->at);
1687 +               }
1688 +
1689                 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
1690                                       0, &err)))
1691                         return err; /* Failure */
1692 @@ -592,6 +910,7 @@ static int ext4_htree_next_block(struct 
1693                 p->bh = bh;
1694                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1695         }
1696 +       ext4_htree_de_lock(lck, p->at);
1697         return 1;
1698  }
1699  
1700 @@ -661,7 +980,7 @@ int ext4_htree_fill_tree(struct file *di
1701  {
1702         struct dx_hash_info hinfo;
1703         struct ext4_dir_entry_2 *de;
1704 -       struct dx_frame frames[2], *frame;
1705 +       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1706         struct inode *dir;
1707         ext4_lblk_t block;
1708         int count = 0;
1709 @@ -684,10 +1003,10 @@ int ext4_htree_fill_tree(struct file *di
1710         }
1711         hinfo.hash = start_hash;
1712         hinfo.minor_hash = 0;
1713 -       frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1714 +       /* assume it's PR locked */
1715 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1716         if (!frame)
1717                 return err;
1718 -
1719         /* Add '.' and '..' from the htree header */
1720         if (!start_hash && !start_minor_hash) {
1721                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1722 @@ -714,7 +1033,7 @@ int ext4_htree_fill_tree(struct file *di
1723                 count += ret;
1724                 hashval = ~0;
1725                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1726 -                                           frame, frames, &hashval);
1727 +                                           frame, frames, &hashval, NULL);
1728                 *next_hash = hashval;
1729                 if (ret < 0) {
1730                         err = ret;
1731 @@ -814,9 +1133,17 @@ static void dx_insert_block(struct dx_fr
1732  
1733  static void ext4_update_dx_flag(struct inode *inode)
1734  {
1735 +       /* Disable it for ldiskfs, because going from a DX directory to
1736 +        * a non-DX directory while it is in use will completely break
1737 +        * the htree-locking.
1738 +        * If we really want to support this operation in the future,
1739 +        * we need to exclusively lock the directory at here which will
1740 +        * increase complexity of code */
1741 +#if 0
1742         if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1743                                      EXT4_FEATURE_COMPAT_DIR_INDEX))
1744                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1745 +#endif
1746  }
1747  
1748  /*
1749 @@ -889,8 +1216,9 @@ static inline int search_dirblock(struct
1750   * to brelse() it when appropriate.
1751   */
1752  struct buffer_head * ext4_find_entry(struct inode *dir,
1753 -                                     const struct qstr *d_name,
1754 -                                     struct ext4_dir_entry_2 ** res_dir)
1755 +                                    const struct qstr *d_name,
1756 +                                    struct ext4_dir_entry_2 **res_dir,
1757 +                                    struct htree_lock *lck)
1758  {
1759         struct super_block *sb;
1760         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1761 @@ -911,7 +1239,7 @@ struct buffer_head * ext4_find_entry(str
1762         if (namelen > EXT4_NAME_LEN)
1763                 return NULL;
1764         if (is_dx(dir)) {
1765 -               bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1766 +               bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1767                 /*
1768                  * On success, or if the error was file not found,
1769                  * return.  Otherwise, fall back to doing a search the
1770 @@ -921,6 +1249,7 @@ struct buffer_head * ext4_find_entry(str
1771                         return bh;
1772                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1773                                "falling back\n"));
1774 +               ext4_htree_safe_relock(lck);
1775         }
1776         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1777         start = EXT4_I(dir)->i_dir_start_lookup;
1778 @@ -998,13 +1327,15 @@ cleanup_and_exit:
1779  }
1780  EXPORT_SYMBOL(ext4_find_entry);
1781  
1782 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1783 -                      struct ext4_dir_entry_2 **res_dir, int *err)
1784 +static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1785 +                               const struct qstr *d_name,
1786 +                               struct ext4_dir_entry_2 **res_dir,
1787 +                               struct htree_lock *lck, int *err)
1788  {
1789         struct super_block * sb;
1790         struct dx_hash_info     hinfo;
1791         u32 hash;
1792 -       struct dx_frame frames[2], *frame;
1793 +       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1794         struct ext4_dir_entry_2 *de, *top;
1795         struct buffer_head *bh;
1796         ext4_lblk_t block;
1797 @@ -1015,13 +1346,16 @@ static struct buffer_head * ext4_dx_find
1798         sb = dir->i_sb;
1799         /* NFS may look up ".." - look at dx_root directory block */
1800         if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
1801 -               if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1802 +               if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1803                         return NULL;
1804         } else {
1805                 frame = frames;
1806                 frame->bh = NULL;                       /* for dx_release() */
1807                 frame->at = (struct dx_entry *)frames;  /* hack for zero entry*/
1808                 dx_set_block(frame->at, 0);             /* dx_root block is 0 */
1809 +               /* "." and ".." are stored in root DX lock */
1810 +               ext4_htree_dx_need_lock(lck);
1811 +               ext4_htree_dx_lock(lck, NULL);
1812         }
1813         hash = hinfo.hash;
1814         do {
1815 @@ -1050,7 +1384,7 @@ static struct buffer_head * ext4_dx_find
1816                 brelse(bh);
1817                 /* Check to see if we should continue to search */
1818                 retval = ext4_htree_next_block(dir, hash, frame,
1819 -                                              frames, NULL);
1820 +                                              frames, NULL, lck);
1821                 if (retval < 0) {
1822                         ext4_warning(sb,
1823                              "error reading index page in directory #%lu",
1824 @@ -1076,7 +1410,7 @@ static struct dentry *ext4_lookup(struct
1825         if (dentry->d_name.len > EXT4_NAME_LEN)
1826                 return ERR_PTR(-ENAMETOOLONG);
1827  
1828 -       bh = ext4_find_entry(dir, &dentry->d_name, &de);
1829 +       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1830         inode = NULL;
1831         if (bh) {
1832                 __u32 ino = le32_to_cpu(de->inode);
1833 @@ -1144,7 +1478,7 @@ struct dentry *ext4_get_parent(struct de
1834         struct ext4_dir_entry_2 * de;
1835         struct buffer_head *bh;
1836  
1837 -       bh = ext4_find_entry(child->d_inode, &dotdot, &de);
1838 +       bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1839         inode = NULL;
1840         if (!bh)
1841                 return ERR_PTR(-ENOENT);
1842 @@ -1233,8 +1567,9 @@ static struct ext4_dir_entry_2* dx_pack_
1843   * Returns pointer to de in block into which the new entry will be inserted.
1844   */
1845  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1846 -                       struct buffer_head **bh,struct dx_frame *frame,
1847 -                       struct dx_hash_info *hinfo, int *error)
1848 +                       struct buffer_head **bh, struct dx_frame *frames,
1849 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1850 +                       struct htree_lock *lck, int *error)
1851  {
1852         unsigned blocksize = dir->i_sb->s_blocksize;
1853         unsigned count, continued;
1854 @@ -1291,7 +1626,14 @@ static struct ext4_dir_entry_2 *do_split
1855                                         hash2, split, count-split));
1856  
1857         /* Fancy dance to stay within two buffers */
1858 -       de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1859 +       if (hinfo->hash < hash2) {
1860 +               de2 = dx_move_dirents(data1, data2, map + split,
1861 +                                     count - split, blocksize);
1862 +       } else {
1863 +               /* make sure we will add entry to the same block which
1864 +                * we have already locked */
1865 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1866 +       }
1867         de = dx_pack_dirents(data1, blocksize);
1868         de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
1869                                            blocksize);
1870 @@ -1300,13 +1642,21 @@ static struct ext4_dir_entry_2 *do_split
1871         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1872         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1873  
1874 -       /* Which block gets the new entry? */
1875 -       if (hinfo->hash >= hash2)
1876 -       {
1877 -               swap(*bh, bh2);
1878 -               de = de2;
1879 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1880 +                            frame->at); /* notify block is being split */
1881 +       if (hinfo->hash < hash2) {
1882 +               dx_insert_block(frame, hash2 + continued, newblock);
1883 +
1884 +       } else {
1885 +               /* switch block number */
1886 +               dx_insert_block(frame, hash2 + continued,
1887 +                               dx_get_block(frame->at));
1888 +               dx_set_block(frame->at, newblock);
1889 +               (frame->at)++;
1890         }
1891 -       dx_insert_block(frame, hash2 + continued, newblock);
1892 +       ext4_htree_spin_unlock(lck);
1893 +       ext4_htree_dx_unlock(lck);
1894 +
1895         err = ext4_handle_dirty_metadata(handle, dir, bh2);
1896         if (err)
1897                 goto journal_error;
1898 @@ -1418,7 +1768,7 @@ static int add_dirent_to_buf(handle_t *h
1899         if (!IS_NOCMTIME(dir))
1900                 dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1901         ext4_update_dx_flag(dir);
1902 -       dir->i_version++;
1903 +       inode_inc_iversion(dir);
1904         ext4_mark_inode_dirty(handle, dir);
1905         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1906         err = ext4_handle_dirty_metadata(handle, dir, bh);
1907 @@ -1438,7 +1788,7 @@ static int make_indexed_dir(handle_t *ha
1908         const char      *name = dentry->d_name.name;
1909         int             namelen = dentry->d_name.len;
1910         struct buffer_head *bh2;
1911 -       struct dx_frame frames[2], *frame;
1912 +       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1913         struct dx_entry *entries;
1914         struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
1915         char            *data1, *top;
1916 @@ -1517,7 +1867,7 @@ static int make_indexed_dir(handle_t *ha
1917         frame->at = entries;
1918         frame->bh = bh;
1919         bh = bh2;
1920 -       de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1921 +       de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
1922         dx_release (frames);
1923         if (!(de))
1924                 return retval;
1925 @@ -1616,7 +1966,7 @@ out:
1926   * the entry, as someone else might have used it while you slept.
1927   */
1928  int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1929 -                  struct inode *inode)
1930 +                  struct inode *inode, struct htree_lock *lck)
1931  {
1932         struct inode *dir = dentry->d_parent->d_inode;
1933         struct buffer_head *bh;
1934 @@ -1635,9 +1985,10 @@ int ext4_add_entry(handle_t *handle, str
1935                 if (dentry->d_name.len == 2 &&
1936                     memcmp(dentry->d_name.name, "..", 2) == 0)
1937                         return ext4_update_dotdot(handle, dentry, inode);
1938 -               retval = ext4_dx_add_entry(handle, dentry, inode);
1939 +               retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1940                 if (!retval || (retval != ERR_BAD_DX_DIR))
1941                         return retval;
1942 +               ext4_htree_safe_relock(lck);
1943                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1944                 dx_fallback++;
1945                 ext4_mark_inode_dirty(handle, dir);
1946 @@ -1674,18 +2025,21 @@ EXPORT_SYMBOL(ext4_add_entry);
1947   * Returns 0 for success, or a negative error value
1948   */
1949  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1950 -                            struct inode *inode)
1951 +                            struct inode *inode, struct htree_lock *lck)
1952  {
1953 -       struct dx_frame frames[2], *frame;
1954 +       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1955         struct dx_entry *entries, *at;
1956         struct dx_hash_info hinfo;
1957         struct buffer_head *bh;
1958         struct inode *dir = dentry->d_parent->d_inode;
1959         struct super_block *sb = dir->i_sb;
1960         struct ext4_dir_entry_2 *de;
1961 +       int restart;
1962         int err;
1963  
1964 -       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1965 +again:
1966 +       restart = 0;
1967 +       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1968         if (!frame)
1969                 return err;
1970         entries = frame->entries;
1971 @@ -1694,33 +2048,53 @@ static int ext4_dx_add_entry(handle_t *h
1972         if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
1973                 goto cleanup;
1974  
1975 -       BUFFER_TRACE(bh, "get_write_access");
1976 -       err = ext4_journal_get_write_access(handle, bh);
1977 -       if (err)
1978 -               goto journal_error;
1979 -
1980         err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1981         if (err != -ENOSPC)
1982                 goto cleanup;
1983  
1984 +       err = 0;
1985         /* Block full, should compress but for now just split */
1986         dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1987                        dx_get_count(entries), dx_get_limit(entries)));
1988         /* Need to split index? */
1989         if (dx_get_count(entries) == dx_get_limit(entries)) {
1990                 ext4_lblk_t newblock;
1991 -               unsigned icount = dx_get_count(entries);
1992 -               int levels = frame - frames;
1993 +               int levels = frame - frames + 1;
1994 +               unsigned icount;
1995 +               int add_level = 1;
1996                 struct dx_entry *entries2;
1997                 struct dx_node *node2;
1998                 struct buffer_head *bh2;
1999  
2000 -               if (levels && (dx_get_count(frames->entries) ==
2001 -                              dx_get_limit(frames->entries))) {
2002 -                       ext4_warning(sb, "Directory index full!");
2003 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
2004 +                       ext4_htree_safe_relock(lck);
2005 +                       restart = 1;
2006 +                       goto cleanup;
2007 +               }
2008 +               while (frame > frames) {
2009 +                       if (dx_get_count((frame - 1)->entries) <
2010 +                           dx_get_limit((frame - 1)->entries)) {
2011 +                               add_level = 0;
2012 +                               break;
2013 +                       }
2014 +                       frame--; /* split higher index block */
2015 +                       at = frame->at;
2016 +                       entries = frame->entries;
2017 +                       restart = 1;
2018 +               }
2019 +               if (add_level && levels == ext4_dir_htree_level(sb)) {
2020 +                       ext4_warning(sb, "Directory (ino: %lu) index full, "
2021 +                                        "reach max htree level :%d",
2022 +                                        dir->i_ino, levels);
2023 +                       if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
2024 +                               ext4_warning(sb, "Large directory feature is"
2025 +                                                "not enabled on this "
2026 +                                                "filesystem");
2027 +                       }
2028                         err = -ENOSPC;
2029                         goto cleanup;
2030                 }
2031 +               icount = dx_get_count(entries);
2032                 bh2 = ext4_append (handle, dir, &newblock, &err);
2033                 if (!(bh2))
2034                         goto cleanup;
2035 @@ -1733,7 +2107,7 @@ static int ext4_dx_add_entry(handle_t *h
2036                 err = ext4_journal_get_write_access(handle, frame->bh);
2037                 if (err)
2038                         goto journal_error;
2039 -               if (levels) {
2040 +               if (!add_level) {
2041                         unsigned icount1 = icount/2, icount2 = icount - icount1;
2042                         unsigned hash2 = dx_get_hash(entries + icount1);
2043                         dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
2044 @@ -1741,7 +2115,7 @@ static int ext4_dx_add_entry(handle_t *h
2045  
2046                         BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2047                         err = ext4_journal_get_write_access(handle,
2048 -                                                            frames[0].bh);
2049 +                                                           (frame - 1)->bh);
2050                         if (err)
2051                                 goto journal_error;
2052  
2053 @@ -1757,18 +2131,24 @@ static int ext4_dx_add_entry(handle_t *h
2054                                 frame->entries = entries = entries2;
2055                                 swap(frame->bh, bh2);
2056                         }
2057 -                       dx_insert_block(frames + 0, hash2, newblock);
2058 -                       dxtrace(dx_show_index("node", frames[1].entries));
2059 +                       dx_insert_block((frame - 1), hash2, newblock);
2060 +                       dxtrace(dx_show_index("node", frame->entries));
2061                         dxtrace(dx_show_index("node",
2062                                ((struct dx_node *) bh2->b_data)->entries));
2063                         err = ext4_handle_dirty_metadata(handle, inode, bh2);
2064                         if (err)
2065                                 goto journal_error;
2066                         brelse (bh2);
2067 +                       ext4_handle_dirty_metadata(handle, inode,
2068 +                                                  (frame - 1)->bh);
2069 +                       if (restart) {
2070 +                               ext4_handle_dirty_metadata(handle, inode,
2071 +                                                          frame->bh);
2072 +                               goto cleanup;
2073 +                       }
2074                 } else {
2075                         struct dx_root_info * info;
2076 -                       dxtrace(printk(KERN_DEBUG
2077 -                                      "Creating second level index...\n"));
2078 +
2079                         memcpy((char *) entries2, (char *) entries,
2080                                icount * sizeof(struct dx_entry));
2081                         dx_set_limit(entries2, dx_node_limit(dir));
2082 @@ -1778,32 +2158,60 @@ static int ext4_dx_add_entry(handle_t *h
2083                         dx_set_block(entries + 0, newblock);
2084                         info = dx_get_dx_info((struct ext4_dir_entry_2*)
2085                                         frames[0].bh->b_data);
2086 -                       info->indirect_levels = 1;
2087 +                       info->indirect_levels += 1;
2088 +                       dxtrace(printk(KERN_DEBUG
2089 +                                      "Creating %d level index...\n",
2090 +                                      info->indirect_levels));
2091 +                       ext4_handle_dirty_metadata(handle, inode, frame->bh);
2092 +                       ext4_handle_dirty_metadata(handle, inode, bh2);
2093 +                       brelse(bh2);
2094 +                       restart = 1;
2095 +                       goto cleanup;
2096 +               }
2097 +       } else if (!ext4_htree_dx_locked(lck)) {
2098 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
2099  
2100 -                       /* Add new access path frame */
2101 -                       frame = frames + 1;
2102 -                       frame->at = at = at - entries + entries2;
2103 -                       frame->entries = entries = entries2;
2104 -                       frame->bh = bh2;
2105 -                       err = ext4_journal_get_write_access(handle,
2106 -                                                            frame->bh);
2107 -                       if (err)
2108 -                               goto journal_error;
2109 +               /* not well protected, require DX lock */
2110 +               ext4_htree_dx_need_lock(lck);
2111 +               at = frame > frames ? (frame - 1)->at : NULL;
2112 +
2113 +               /* NB: no risk of deadlock because it's just a try.
2114 +                *
2115 +                * NB: we check ld_count for twice, the first time before
2116 +                * having DX lock, the second time after holding DX lock.
2117 +                *
2118 +                * NB: We never free blocks for directory so far, which
2119 +                * means value returned by dx_get_count() should equal to
2120 +                * ld->ld_count if nobody split any DE-block under @at,
2121 +                * and ld->ld_at still points to valid dx_entry. */
2122 +               if ((ld->ld_count != dx_get_count(entries)) ||
2123 +                   !ext4_htree_dx_lock_try(lck, at) ||
2124 +                   (ld->ld_count != dx_get_count(entries))) {
2125 +                       restart = 1;
2126 +                       goto cleanup;
2127                 }
2128 -               ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
2129 +               /* OK, I've got DX lock and nothing changed */
2130 +               frame->at = ld->ld_at;
2131         }
2132 -       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
2133 +       de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
2134         if (!de)
2135                 goto cleanup;
2136 +
2137         err = add_dirent_to_buf(handle, dentry, inode, de, bh);
2138         goto cleanup;
2139  
2140  journal_error:
2141         ext4_std_error(dir->i_sb, err);
2142  cleanup:
2143 +       ext4_htree_dx_unlock(lck);
2144 +       ext4_htree_de_unlock(lck);
2145         if (bh)
2146                 brelse(bh);
2147         dx_release(frames);
2148 +       /* @restart is true means htree-path has been changed, we need to
2149 +        * repeat dx_probe() to find out valid htree-path */
2150 +       if (restart && err == 0)
2151 +               goto again;
2152         return err;
2153  }
2154  
2155 @@ -1838,7 +2246,7 @@ int ext4_delete_entry(handle_t *handle,
2156                                         blocksize);
2157                         else
2158                                 de->inode = 0;
2159 -                       dir->i_version++;
2160 +                       inode_inc_iversion(dir);
2161                         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
2162                         ext4_handle_dirty_metadata(handle, dir, bh);
2163                         return 0;
2164 @@ -1882,7 +2290,7 @@ static void ext4_dec_count(handle_t *han
2165  static int ext4_add_nondir(handle_t *handle,
2166                 struct dentry *dentry, struct inode *inode)
2167  {
2168 -       int err = ext4_add_entry(handle, dentry, inode);
2169 +       int err = ext4_add_entry(handle, dentry, inode, NULL);
2170         if (!err) {
2171                 ext4_mark_inode_dirty(handle, inode);
2172                 d_instantiate(dentry, inode);
2173 @@ -2112,7 +2520,7 @@ retry:
2174                 goto out_stop;
2175         }
2176  
2177 -       err = ext4_add_entry(handle, dentry, inode);
2178 +       err = ext4_add_entry(handle, dentry, inode, NULL);
2179         if (err) {
2180                 clear_nlink(inode);
2181                 unlock_new_inode(inode);
2182 @@ -2381,7 +2789,7 @@ static int ext4_rmdir(struct inode *dir,
2183                 return PTR_ERR(handle);
2184  
2185         retval = -ENOENT;
2186 -       bh = ext4_find_entry(dir, &dentry->d_name, &de);
2187 +       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2188         if (!bh)
2189                 goto end_rmdir;
2190  
2191 @@ -2443,7 +2851,7 @@ static int ext4_unlink(struct inode *dir
2192                 ext4_handle_sync(handle);
2193  
2194         retval = -ENOENT;
2195 -       bh = ext4_find_entry(dir, &dentry->d_name, &de);
2196 +       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2197         if (!bh)
2198                 goto end_unlink;
2199  
2200 @@ -2567,7 +2975,7 @@ retry:
2201         ext4_inc_count(handle, inode);
2202         atomic_inc(&inode->i_count);
2203  
2204 -       err = ext4_add_entry(handle, dentry, inode);
2205 +       err = ext4_add_entry(handle, dentry, inode, NULL);
2206         if (!err) {
2207                 ext4_mark_inode_dirty(handle, inode);
2208                 d_instantiate(dentry, inode);
2209 @@ -2612,7 +3020,7 @@ static int ext4_rename(struct inode *old
2210         if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
2211                 ext4_handle_sync(handle);
2212  
2213 -       old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
2214 +       old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
2215         /*
2216          *  Check for inode number is _not_ due to possible IO errors.
2217          *  We might rmdir the source, keep it as pwd of some process
2218 @@ -2625,7 +3033,7 @@ static int ext4_rename(struct inode *old
2219                 goto end_rename;
2220  
2221         new_inode = new_dentry->d_inode;
2222 -       new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
2223 +       new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, NULL);
2224         if (new_bh) {
2225                 if (!new_inode) {
2226                         brelse(new_bh);
2227 @@ -2651,7 +3059,7 @@ static int ext4_rename(struct inode *old
2228                         goto end_rename;
2229         }
2230         if (!new_bh) {
2231 -               retval = ext4_add_entry(handle, new_dentry, old_inode);
2232 +               retval = ext4_add_entry(handle, new_dentry, old_inode, NULL);
2233                 if (retval)
2234                         goto end_rename;
2235         } else {
2236 @@ -2693,7 +3101,8 @@ static int ext4_rename(struct inode *old
2237                 struct buffer_head *old_bh2;
2238                 struct ext4_dir_entry_2 *old_de2;
2239  
2240 -               old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
2241 +               old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
2242 +                                         &old_de2, NULL);
2243                 if (old_bh2) {
2244                         retval = ext4_delete_entry(handle, old_dir,
2245                                                    old_de2, old_bh2);
2246 --- linux-2.6.32-131.6.1/fs/ext4/inode.c        2011-10-06 20:10:49.000000000 +0800
2247 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/inode.c    2011-12-01 22:02:11.000000000 +0800
2248 @@ -5112,7 +5112,7 @@ struct inode *ext4_iget(struct super_blo
2249         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
2250                 ei->i_file_acl |=
2251                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2252 -       inode->i_size = ext4_isize(raw_inode);
2253 +       inode->i_size = ext4_isize(sb, raw_inode);
2254         ei->i_disksize = inode->i_size;
2255  #ifdef CONFIG_QUOTA
2256         ei->i_reserved_quota = 0;
2257 --- linux-2.6.32-131.6.1/fs/ext4/Makefile       2011-10-06 20:10:49.000000000 +0800
2258 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/Makefile   2011-10-06 12:21:30.000000000 +0800
2259 @@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
2260  ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
2261                 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
2262                 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
2263 -               mmp.o dynlocks.o
2264 +               htree_lock.o mmp.o dynlocks.o
2265  
2266  ext4-$(CONFIG_EXT4_FS_XATTR)           += xattr.o xattr_user.o xattr_trusted.o
2267  ext4-$(CONFIG_EXT4_FS_POSIX_ACL)       += acl.o