Whamcloud - gitweb
0ae29c9c80e7a1bbb02b9d513c451835a8016210
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel7.7 / ext4-pdirop.patch
1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
12
13 This patch contains:
14  - pdirops support for ldiskfs
15  - integrate with osd-ldiskfs
16
17 Index: linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
18 ===================================================================
19 --- /dev/null
20 +++ linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
21 @@ -0,0 +1,187 @@
22 +/*
23 + * include/linux/htree_lock.h
24 + *
25 + * Copyright (c) 2011, 2012, Intel Corporation.
26 + *
27 + * Author: Liang Zhen <liang@whamcloud.com>
28 + */
29 +
30 +/*
31 + * htree lock
32 + *
33 + * htree_lock is an advanced lock, it can support five lock modes (concept is
34 + * taken from DLM) and it's a sleeping lock.
35 + *
36 + * most common use case is:
37 + * - create a htree_lock_head for data
38 + * - each thread (contender) creates it's own htree_lock
39 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
40 + *   call htree_unlock to release lock
41 + *
42 + * Also, there is advanced use-case which is more complex, user can have
43 + * PW/PR lock on particular key, it's mostly used while user holding shared
44 + * lock on the htree (CW, CR)
45 + *
46 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
47 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
48 + * ...
49 + * htree_node_unlock(lock_node);; unlock the key
50 + *
51 + * Another tip is, we can have N-levels of this kind of keys, all we need to
52 + * do is specifying N-levels while creating htree_lock_head, then we can
53 + * lock/unlock a specific level by:
54 + * htree_node_lock(lock_node, mode1, key1, level1...);
55 + * do something;
56 + * htree_node_lock(lock_node, mode1, key2, level2...);
57 + * do something;
58 + * htree_node_unlock(lock_node, level2);
59 + * htree_node_unlock(lock_node, level1);
60 + *
61 + * NB: for multi-level, should be careful about locking order to avoid deadlock
62 + */
63 +
64 +#ifndef _LINUX_HTREE_LOCK_H
65 +#define _LINUX_HTREE_LOCK_H
66 +
67 +#include <linux/list.h>
68 +#include <linux/spinlock.h>
69 +#include <linux/sched.h>
70 +
71 +/*
72 + * Lock Modes
73 + * more details can be found here:
74 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
75 + */
76 +typedef enum {
77 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
78 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
79 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
80 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
81 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
82 +       HTREE_LOCK_MAX,      /* number of lock modes */
83 +} htree_lock_mode_t;
84 +
85 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
86 +#define HTREE_LOCK_INVAL       0xdead10c
87 +
88 +enum {
89 +       HTREE_HBITS_MIN         = 2,
90 +       HTREE_HBITS_DEF         = 14,
91 +       HTREE_HBITS_MAX         = 32,
92 +};
93 +
94 +enum {
95 +       HTREE_EVENT_DISABLE     = (0),
96 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
97 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
98 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
99 +};
100 +
101 +struct htree_lock;
102 +
103 +typedef void (*htree_event_cb_t)(void *target, void *event);
104 +
105 +struct htree_lock_child {
106 +       struct list_head        lc_list;        /* granted list */
107 +       htree_event_cb_t        lc_callback;    /* event callback */
108 +       unsigned                lc_events;      /* event types */
109 +};
110 +
111 +struct htree_lock_head {
112 +       unsigned long           lh_lock;        /* bits lock */
113 +       /* blocked lock list (htree_lock) */
114 +       struct list_head        lh_blocked_list;
115 +       /* # key levels */
116 +       u16                     lh_depth;
117 +       /* hash bits for key and limit number of locks */
118 +       u16                     lh_hbits;
119 +       /* counters for blocked locks */
120 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
121 +       /* counters for granted locks */
122 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
123 +       /* private data */
124 +       void                    *lh_private;
125 +       /* array of children locks */
126 +       struct htree_lock_child lh_children[0];
127 +};
128 +
129 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
130 +struct htree_lock_node {
131 +       htree_lock_mode_t       ln_mode;
132 +       /* major hash key */
133 +       u16                     ln_major_key;
134 +       /* minor hash key */
135 +       u16                     ln_minor_key;
136 +       struct list_head        ln_major_list;
137 +       struct list_head        ln_minor_list;
138 +       /* alive list, all locks (granted, blocked, listening) are on it */
139 +       struct list_head        ln_alive_list;
140 +       /* blocked list */
141 +       struct list_head        ln_blocked_list;
142 +       /* granted list */
143 +       struct list_head        ln_granted_list;
144 +       void                    *ln_ev_target;
145 +};
146 +
147 +struct htree_lock {
148 +       struct task_struct      *lk_task;
149 +       struct htree_lock_head  *lk_head;
150 +       void                    *lk_private;
151 +       unsigned                lk_depth;
152 +       htree_lock_mode_t       lk_mode;
153 +       struct list_head        lk_blocked_list;
154 +       struct htree_lock_node  lk_nodes[0];
155 +};
156 +
157 +/* create a lock head, which stands for a resource */
158 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
159 +                                             unsigned hbits, unsigned priv);
160 +/* free a lock head */
161 +void htree_lock_head_free(struct htree_lock_head *lhead);
162 +/* register event callback for child lock at level @depth */
163 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
164 +                            unsigned events, htree_event_cb_t callback);
165 +/* create a lock handle, which stands for a thread */
166 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
167 +/* free a lock handle */
168 +void htree_lock_free(struct htree_lock *lck);
169 +/* lock htree, when @wait is true, 0 is returned if the lock can't
170 + * be granted immediately */
171 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
172 +                  htree_lock_mode_t mode, int wait);
173 +/* unlock htree */
174 +void htree_unlock(struct htree_lock *lck);
175 +/* unlock and relock htree with @new_mode */
176 +int htree_change_lock_try(struct htree_lock *lck,
177 +                         htree_lock_mode_t new_mode, int wait);
178 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
179 +/* require child lock (key) of htree at level @dep, @event will be sent to all
180 + * listeners on this @key while lock being granted */
181 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
182 +                       u32 key, unsigned dep, int wait, void *event);
183 +/* release child lock at level @dep, this lock will listen on it's key
184 + * if @event isn't NULL, event_cb will be called against @lck while granting
185 + * any other lock at level @dep with the same key */
186 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
187 +/* stop listening on child lock at level @dep */
188 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
189 +/* for debug */
190 +void htree_lock_stat_print(int depth);
191 +void htree_lock_stat_reset(void);
192 +
193 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
194 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
195 +
196 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
197 +
198 +#define htree_node_lock(lck, mode, key, dep)   \
199 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
200 +/* this is only safe in thread context of lock owner */
201 +#define htree_node_is_granted(lck, dep)                \
202 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
203 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
204 +/* this is only safe in thread context of lock owner */
205 +#define htree_node_is_listening(lck, dep)      \
206 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
207 +
208 +#endif
209 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
210 ===================================================================
211 --- /dev/null
212 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
213 @@ -0,0 +1,895 @@
214 +/*
215 + * fs/ext4/htree_lock.c
216 + *
217 + * Copyright (c) 2011, 2012, Intel Corporation.
218 + *
219 + * Author: Liang Zhen <liang@whamcloud.com>
220 + */
221 +#include <linux/jbd2.h>
222 +#include <linux/hash.h>
223 +#include <linux/module.h>
224 +#include <linux/htree_lock.h>
225 +
226 +enum {
227 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
228 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
229 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
230 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
231 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
232 +};
233 +
234 +enum {
235 +       HTREE_LOCK_COMPAT_EX    = 0,
236 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
237 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
238 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
239 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
240 +                                 HTREE_LOCK_BIT_PW,
241 +};
242 +
243 +static int htree_lock_compat[] = {
244 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
245 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
246 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
247 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
248 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
249 +};
250 +
251 +/* max allowed htree-lock depth.
252 + * We only need depth=3 for ext4 although user can have higher value. */
253 +#define HTREE_LOCK_DEP_MAX     16
254 +
255 +#ifdef HTREE_LOCK_DEBUG
256 +
257 +static char *hl_name[] = {
258 +       [HTREE_LOCK_EX]         "EX",
259 +       [HTREE_LOCK_PW]         "PW",
260 +       [HTREE_LOCK_PR]         "PR",
261 +       [HTREE_LOCK_CW]         "CW",
262 +       [HTREE_LOCK_CR]         "CR",
263 +};
264 +
265 +/* lock stats */
266 +struct htree_lock_node_stats {
267 +       unsigned long long      blocked[HTREE_LOCK_MAX];
268 +       unsigned long long      granted[HTREE_LOCK_MAX];
269 +       unsigned long long      retried[HTREE_LOCK_MAX];
270 +       unsigned long long      events;
271 +};
272 +
273 +struct htree_lock_stats {
274 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
275 +       unsigned long long      granted[HTREE_LOCK_MAX];
276 +       unsigned long long      blocked[HTREE_LOCK_MAX];
277 +};
278 +
279 +static struct htree_lock_stats hl_stats;
280 +
281 +void htree_lock_stat_reset(void)
282 +{
283 +       memset(&hl_stats, 0, sizeof(hl_stats));
284 +}
285 +
286 +void htree_lock_stat_print(int depth)
287 +{
288 +       int     i;
289 +       int     j;
290 +
291 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
292 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
293 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
294 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
295 +       }
296 +       for (i = 0; i < depth; i++) {
297 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
298 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
299 +                       printk(KERN_DEBUG
300 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
301 +                               hl_name[j], hl_stats.nodes[i].granted[j],
302 +                               hl_stats.nodes[i].blocked[j],
303 +                               hl_stats.nodes[i].retried[j]);
304 +               }
305 +       }
306 +}
307 +
308 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
309 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
310 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
311 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
312 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
313 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
314 +
315 +#else /* !DEBUG */
316 +
317 +void htree_lock_stat_reset(void) {}
318 +void htree_lock_stat_print(int depth) {}
319 +
320 +#define lk_grant_inc(m)              do {} while (0)
321 +#define lk_block_inc(m)              do {} while (0)
322 +#define ln_grant_inc(d, m)    do {} while (0)
323 +#define ln_block_inc(d, m)    do {} while (0)
324 +#define ln_retry_inc(d, m)    do {} while (0)
325 +#define ln_event_inc(d)              do {} while (0)
326 +
327 +#endif /* DEBUG */
328 +
329 +EXPORT_SYMBOL(htree_lock_stat_reset);
330 +EXPORT_SYMBOL(htree_lock_stat_print);
331 +
332 +#define HTREE_DEP_ROOT           (-1)
333 +
334 +#define htree_spin_lock(lhead, dep)                            \
335 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
336 +#define htree_spin_unlock(lhead, dep)                          \
337 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
338 +
339 +#define htree_key_event_ignore(child, ln)                      \
340 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
341 +
342 +static int
343 +htree_key_list_empty(struct htree_lock_node *ln)
344 +{
345 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
346 +}
347 +
348 +static void
349 +htree_key_list_del_init(struct htree_lock_node *ln)
350 +{
351 +       struct htree_lock_node *tmp = NULL;
352 +
353 +       if (!list_empty(&ln->ln_minor_list)) {
354 +               tmp = list_entry(ln->ln_minor_list.next,
355 +                                struct htree_lock_node, ln_minor_list);
356 +               list_del_init(&ln->ln_minor_list);
357 +       }
358 +
359 +       if (list_empty(&ln->ln_major_list))
360 +               return;
361 +
362 +       if (tmp == NULL) { /* not on minor key list */
363 +               list_del_init(&ln->ln_major_list);
364 +       } else {
365 +               BUG_ON(!list_empty(&tmp->ln_major_list));
366 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
367 +       }
368 +}
369 +
370 +static void
371 +htree_key_list_replace_init(struct htree_lock_node *old,
372 +                           struct htree_lock_node *new)
373 +{
374 +       if (!list_empty(&old->ln_major_list))
375 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
376 +
377 +       if (!list_empty(&old->ln_minor_list))
378 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
379 +}
380 +
381 +static void
382 +htree_key_event_enqueue(struct htree_lock_child *child,
383 +                       struct htree_lock_node *ln, int dep, void *event)
384 +{
385 +       struct htree_lock_node *tmp;
386 +
387 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
388 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
389 +       if (event == NULL || htree_key_event_ignore(child, ln))
390 +               return;
391 +
392 +       /* shouldn't be a very long list */
393 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
394 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
395 +                       ln_event_inc(dep);
396 +                       if (child->lc_callback != NULL)
397 +                               child->lc_callback(tmp->ln_ev_target, event);
398 +               }
399 +       }
400 +}
401 +
402 +static int
403 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
404 +                       unsigned dep, int wait, void *event)
405 +{
406 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
407 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
408 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
409 +
410 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
411 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
412 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
413 +        * NL is only used for listener, user can't directly require NL mode */
414 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
415 +           (curln->ln_mode != HTREE_LOCK_PW &&
416 +            newln->ln_mode != HTREE_LOCK_PW)) {
417 +               /* no conflict, attach it on granted list of @curlk */
418 +               if (curln->ln_mode != HTREE_LOCK_NL) {
419 +                       list_add(&newln->ln_granted_list,
420 +                                &curln->ln_granted_list);
421 +               } else {
422 +                       /* replace key owner */
423 +                       htree_key_list_replace_init(curln, newln);
424 +               }
425 +
426 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
427 +               htree_key_event_enqueue(child, newln, dep, event);
428 +               ln_grant_inc(dep, newln->ln_mode);
429 +               return 1; /* still hold lh_lock */
430 +       }
431 +
432 +       if (!wait) { /* can't grant and don't want to wait */
433 +               ln_retry_inc(dep, newln->ln_mode);
434 +               newln->ln_mode = HTREE_LOCK_INVAL;
435 +               return -1; /* don't wait and just return -1 */
436 +       }
437 +
438 +       newlk->lk_task = current;
439 +       /* conflict, attach it on blocked list of curlk */
440 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
441 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
442 +       ln_block_inc(dep, newln->ln_mode);
443 +
444 +retry:
445 +       set_current_state(TASK_UNINTERRUPTIBLE);
446 +       htree_spin_unlock(newlk->lk_head, dep);
447 +       /* wait to be given the lock */
448 +       if (newlk->lk_task != NULL)
449 +               schedule();
450 +       /* granted, no doubt, wake up will set me RUNNING */
451 +       htree_spin_lock(newlk->lk_head, dep);
452 +       /* Need to check lock really granted, thread maybe awaken wrongly */
453 +       if (list_empty(&newln->ln_granted_list) && htree_key_list_empty(newln))
454 +               goto retry;
455 +       if (event && !htree_key_event_ignore(child, newln))
456 +               htree_key_event_enqueue(child, newln, dep, event);
457 +
458 +       return 1; /* still hold lh_lock */
459 +}
460 +
461 +/*
462 + * get PR/PW access to particular tree-node according to @dep and @key,
463 + * it will return -1 if @wait is false and can't immediately grant this lock.
464 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
465 + * @event if it's not NULL.
466 + * NB: ALWAYS called holding lhead::lh_lock
467 + */
468 +static int
469 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
470 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
471 +                        int wait, void *event)
472 +{
473 +       LIST_HEAD(list);
474 +       struct htree_lock       *tmp;
475 +       struct htree_lock       *tmp2;
476 +       u16                     major;
477 +       u16                     minor;
478 +       u8                      reverse;
479 +       u8                      ma_bits;
480 +       u8                      mi_bits;
481 +
482 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
483 +       BUG_ON(htree_node_is_granted(lck, dep));
484 +
485 +       key = hash_long(key, lhead->lh_hbits);
486 +
487 +       mi_bits = lhead->lh_hbits >> 1;
488 +       ma_bits = lhead->lh_hbits - mi_bits;
489 +
490 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
491 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
492 +       lck->lk_nodes[dep].ln_mode = mode;
493 +
494 +       /*
495 +        * The major key list is an ordered list, so searches are started
496 +        * at the end of the list that is numerically closer to major_key,
497 +        * so at most half of the list will be walked (for well-distributed
498 +        * keys). The list traversal aborts early if the expected key
499 +        * location is passed.
500 +        */
501 +       reverse = (major >= (1 << (ma_bits - 1)));
502 +
503 +       if (reverse) {
504 +               list_for_each_entry_reverse(tmp,
505 +                                       &lhead->lh_children[dep].lc_list,
506 +                                       lk_nodes[dep].ln_major_list) {
507 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
508 +                               goto search_minor;
509 +
510 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
511 +                               /* attach _after_ @tmp */
512 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
513 +                                        &tmp->lk_nodes[dep].ln_major_list);
514 +                               goto out_grant_major;
515 +                       }
516 +               }
517 +
518 +               list_add(&lck->lk_nodes[dep].ln_major_list,
519 +                        &lhead->lh_children[dep].lc_list);
520 +               goto out_grant_major;
521 +
522 +       } else {
523 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
524 +                                   lk_nodes[dep].ln_major_list) {
525 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
526 +                               goto search_minor;
527 +
528 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
529 +                               /* insert _before_ @tmp */
530 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
531 +                                       &tmp->lk_nodes[dep].ln_major_list);
532 +                               goto out_grant_major;
533 +                       }
534 +               }
535 +
536 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
537 +                             &lhead->lh_children[dep].lc_list);
538 +               goto out_grant_major;
539 +       }
540 +
541 + search_minor:
542 +       /*
543 +        * NB: minor_key list doesn't have a "head", @list is just a
544 +        * temporary stub for helping list searching, make sure it's removed
545 +        * after searching.
546 +        * minor_key list is an ordered list too.
547 +        */
548 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
549 +
550 +       reverse = (minor >= (1 << (mi_bits - 1)));
551 +
552 +       if (reverse) {
553 +               list_for_each_entry_reverse(tmp2, &list,
554 +                                           lk_nodes[dep].ln_minor_list) {
555 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
556 +                               goto out_enqueue;
557 +
558 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
559 +                               /* attach _after_ @tmp2 */
560 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
561 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
562 +                               goto out_grant_minor;
563 +                       }
564 +               }
565 +
566 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
567 +
568 +       } else {
569 +               list_for_each_entry(tmp2, &list,
570 +                                   lk_nodes[dep].ln_minor_list) {
571 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
572 +                               goto out_enqueue;
573 +
574 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
575 +                               /* insert _before_ @tmp2 */
576 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
577 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
578 +                               goto out_grant_minor;
579 +                       }
580 +               }
581 +
582 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
583 +       }
584 +
585 + out_grant_minor:
586 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
587 +               /* new lock @lck is the first one on minor_key list, which
588 +                * means it has the smallest minor_key and it should
589 +                * replace @tmp as minor_key owner */
590 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
591 +                                 &lck->lk_nodes[dep].ln_major_list);
592 +       }
593 +       /* remove the temporary head */
594 +       list_del(&list);
595 +
596 + out_grant_major:
597 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
598 +       return 1; /* granted with holding lh_lock */
599 +
600 + out_enqueue:
601 +       list_del(&list); /* remove temprary head */
602 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
603 +}
604 +
605 +/*
606 + * release the key of @lck at level @dep, and grant any blocked locks.
607 + * caller will still listen on @key if @event is not NULL, which means
608 + * caller can see a event (by event_cb) while granting any lock with
609 + * the same key at level @dep.
610 + * NB: ALWAYS called holding lhead::lh_lock
611 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
612 + */
613 +static void
614 +htree_node_unlock_internal(struct htree_lock_head *lhead,
615 +                          struct htree_lock *curlk, unsigned dep, void *event)
616 +{
617 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
618 +       struct htree_lock       *grtlk = NULL;
619 +       struct htree_lock_node  *grtln;
620 +       struct htree_lock       *poslk;
621 +       struct htree_lock       *tmplk;
622 +
623 +       if (!htree_node_is_granted(curlk, dep))
624 +               return;
625 +
626 +       if (!list_empty(&curln->ln_granted_list)) {
627 +               /* there is another granted lock */
628 +               grtlk = list_entry(curln->ln_granted_list.next,
629 +                                  struct htree_lock,
630 +                                  lk_nodes[dep].ln_granted_list);
631 +               list_del_init(&curln->ln_granted_list);
632 +       }
633 +
634 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
635 +               /*
636 +                * @curlk is the only granted lock, so we confirmed:
637 +                * a) curln is key owner (attached on major/minor_list),
638 +                *    so if there is any blocked lock, it should be attached
639 +                *    on curln->ln_blocked_list
640 +                * b) we always can grant the first blocked lock
641 +                */
642 +               grtlk = list_entry(curln->ln_blocked_list.next,
643 +                                  struct htree_lock,
644 +                                  lk_nodes[dep].ln_blocked_list);
645 +               BUG_ON(grtlk->lk_task == NULL);
646 +               wake_up_process(grtlk->lk_task);
647 +       }
648 +
649 +       if (event != NULL &&
650 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
651 +               curln->ln_ev_target = event;
652 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
653 +       } else {
654 +               curln->ln_mode = HTREE_LOCK_INVAL;
655 +       }
656 +
657 +       if (grtlk == NULL) { /* I must be the only one locking this key */
658 +               struct htree_lock_node *tmpln;
659 +
660 +               BUG_ON(htree_key_list_empty(curln));
661 +
662 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
663 +                       return;
664 +
665 +               /* not listening */
666 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
667 +                       htree_key_list_del_init(curln);
668 +                       return;
669 +               }
670 +
671 +               tmpln = list_entry(curln->ln_alive_list.next,
672 +                                  struct htree_lock_node, ln_alive_list);
673 +
674 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
675 +
676 +               htree_key_list_replace_init(curln, tmpln);
677 +               list_del_init(&curln->ln_alive_list);
678 +
679 +               return;
680 +       }
681 +
682 +       /* have a granted lock */
683 +       grtln = &grtlk->lk_nodes[dep];
684 +       if (!list_empty(&curln->ln_blocked_list)) {
685 +               /* only key owner can be on both lists */
686 +               BUG_ON(htree_key_list_empty(curln));
687 +
688 +               if (list_empty(&grtln->ln_blocked_list)) {
689 +                       list_add(&grtln->ln_blocked_list,
690 +                                &curln->ln_blocked_list);
691 +               }
692 +               list_del_init(&curln->ln_blocked_list);
693 +       }
694 +       /*
695 +        * NB: this is the tricky part:
696 +        * We have only two modes for child-lock (PR and PW), also,
697 +        * only owner of the key (attached on major/minor_list) can be on
698 +        * both blocked_list and granted_list, so @grtlk must be one
699 +        * of these two cases:
700 +        *
701 +        * a) @grtlk is taken from granted_list, which means we've granted
702 +        *    more than one lock so @grtlk has to be PR, the first blocked
703 +        *    lock must be PW and we can't grant it at all.
704 +        *    So even @grtlk is not owner of the key (empty blocked_list),
705 +        *    we don't care because we can't grant any lock.
706 +        * b) we just grant a new lock which is taken from head of blocked
707 +        *    list, and it should be the first granted lock, and it should
708 +        *    be the first one linked on blocked_list.
709 +        *
710 +        * Either way, we can get correct result by iterating blocked_list
711 +        * of @grtlk, and don't have to bother on how to find out
712 +        * owner of current key.
713 +        */
714 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
715 +                                lk_nodes[dep].ln_blocked_list) {
716 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
717 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
718 +                       break;
719 +               /* grant all readers */
720 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
721 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
722 +                        &grtln->ln_granted_list);
723 +
724 +               BUG_ON(poslk->lk_task == NULL);
725 +               wake_up_process(poslk->lk_task);
726 +       }
727 +
728 +       /* if @curln is the owner of this key, replace it with @grtln */
729 +       if (!htree_key_list_empty(curln))
730 +               htree_key_list_replace_init(curln, grtln);
731 +
732 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
733 +               list_del_init(&curln->ln_alive_list);
734 +}
735 +
736 +/*
737 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
738 + * and 0 only if @wait is false and can't grant it immediately
739 + */
740 +int
741 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
742 +                   u32 key, unsigned dep, int wait, void *event)
743 +{
744 +       struct htree_lock_head *lhead = lck->lk_head;
745 +       int rc;
746 +
747 +       BUG_ON(dep >= lck->lk_depth);
748 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
749 +
750 +       htree_spin_lock(lhead, dep);
751 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
752 +       if (rc != 0)
753 +               htree_spin_unlock(lhead, dep);
754 +       return rc >= 0;
755 +}
756 +EXPORT_SYMBOL(htree_node_lock_try);
757 +
758 +/* it's wrapper of htree_node_unlock_internal */
759 +void
760 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
761 +{
762 +       struct htree_lock_head *lhead = lck->lk_head;
763 +
764 +       BUG_ON(dep >= lck->lk_depth);
765 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
766 +
767 +       htree_spin_lock(lhead, dep);
768 +       htree_node_unlock_internal(lhead, lck, dep, event);
769 +       htree_spin_unlock(lhead, dep);
770 +}
771 +EXPORT_SYMBOL(htree_node_unlock);
772 +
773 +/* stop listening on child-lock level @dep */
774 +void
775 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
776 +{
777 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
778 +       struct htree_lock_node *tmp;
779 +
780 +       BUG_ON(htree_node_is_granted(lck, dep));
781 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
782 +       BUG_ON(!list_empty(&ln->ln_granted_list));
783 +
784 +       if (!htree_node_is_listening(lck, dep))
785 +               return;
786 +
787 +       htree_spin_lock(lck->lk_head, dep);
788 +       ln->ln_mode = HTREE_LOCK_INVAL;
789 +       ln->ln_ev_target = NULL;
790 +
791 +       if (htree_key_list_empty(ln)) { /* not owner */
792 +               list_del_init(&ln->ln_alive_list);
793 +               goto out;
794 +       }
795 +
796 +       /* I'm the owner... */
797 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
798 +               htree_key_list_del_init(ln);
799 +               goto out;
800 +       }
801 +
802 +       tmp = list_entry(ln->ln_alive_list.next,
803 +                        struct htree_lock_node, ln_alive_list);
804 +
805 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
806 +       htree_key_list_replace_init(ln, tmp);
807 +       list_del_init(&ln->ln_alive_list);
808 + out:
809 +       htree_spin_unlock(lck->lk_head, dep);
810 +}
811 +EXPORT_SYMBOL(htree_node_stop_listen);
812 +
813 +/* release all child-locks if we have any */
814 +static void
815 +htree_node_release_all(struct htree_lock *lck)
816 +{
817 +       int     i;
818 +
819 +       for (i = 0; i < lck->lk_depth; i++) {
820 +               if (htree_node_is_granted(lck, i))
821 +                       htree_node_unlock(lck, i, NULL);
822 +               else if (htree_node_is_listening(lck, i))
823 +                       htree_node_stop_listen(lck, i);
824 +       }
825 +}
826 +
827 +/*
828 + * obtain htree lock, it could be blocked inside if there's conflict
829 + * with any granted or blocked lock and @wait is true.
830 + * NB: ALWAYS called holding lhead::lh_lock
831 + */
832 +static int
833 +htree_lock_internal(struct htree_lock *lck, int wait)
834 +{
835 +       struct htree_lock_head *lhead = lck->lk_head;
836 +       int     granted = 0;
837 +       int     blocked = 0;
838 +       int     i;
839 +
840 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
841 +               if (lhead->lh_ngranted[i] != 0)
842 +                       granted |= 1 << i;
843 +               if (lhead->lh_nblocked[i] != 0)
844 +                       blocked |= 1 << i;
845 +       }
846 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
847 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
848 +               /* will block current lock even it just conflicts with any
849 +                * other blocked lock, so lock like EX wouldn't starve */
850 +               if (!wait)
851 +                       return -1;
852 +               lhead->lh_nblocked[lck->lk_mode]++;
853 +               lk_block_inc(lck->lk_mode);
854 +
855 +               lck->lk_task = current;
856 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
857 +
858 +retry:
859 +               set_current_state(TASK_UNINTERRUPTIBLE);
860 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
861 +               /* wait to be given the lock */
862 +               if (lck->lk_task != NULL)
863 +                       schedule();
864 +               /* granted, no doubt. wake up will set me RUNNING.
865 +                * Since thread would be waken up accidentally,
866 +                * so we need check lock whether granted or not again. */
867 +               if (!list_empty(&lck->lk_blocked_list)) {
868 +                       htree_spin_lock(lhead, HTREE_DEP_ROOT);
869 +                       if (list_empty(&lck->lk_blocked_list)) {
870 +                               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
871 +                               return 0;
872 +                       }
873 +                       goto retry;
874 +               }
875 +               return 0; /* without lh_lock */
876 +       }
877 +       lhead->lh_ngranted[lck->lk_mode]++;
878 +       lk_grant_inc(lck->lk_mode);
879 +       return 1;
880 +}
881 +
882 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
883 +static void
884 +htree_unlock_internal(struct htree_lock *lck)
885 +{
886 +       struct htree_lock_head *lhead = lck->lk_head;
887 +       struct htree_lock *tmp;
888 +       struct htree_lock *tmp2;
889 +       int granted = 0;
890 +       int i;
891 +
892 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
893 +
894 +       lhead->lh_ngranted[lck->lk_mode]--;
895 +       lck->lk_mode = HTREE_LOCK_INVAL;
896 +
897 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
898 +               if (lhead->lh_ngranted[i] != 0)
899 +                       granted |= 1 << i;
900 +       }
901 +       list_for_each_entry_safe(tmp, tmp2,
902 +                                &lhead->lh_blocked_list, lk_blocked_list) {
903 +               /* conflict with any granted lock? */
904 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
905 +                       break;
906 +
907 +               list_del_init(&tmp->lk_blocked_list);
908 +
909 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
910 +
911 +               lhead->lh_nblocked[tmp->lk_mode]--;
912 +               lhead->lh_ngranted[tmp->lk_mode]++;
913 +               granted |= 1 << tmp->lk_mode;
914 +
915 +               BUG_ON(tmp->lk_task == NULL);
916 +               wake_up_process(tmp->lk_task);
917 +       }
918 +}
919 +
920 +/* it's wrapper of htree_lock_internal and exported interface.
921 + * It always return 1 with granted lock if @wait is true, it can return 0
922 + * if @wait is false and locking request can't be granted immediately */
923 +int
924 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
925 +              htree_lock_mode_t mode, int wait)
926 +{
927 +       int     rc;
928 +
929 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
930 +       BUG_ON(lck->lk_head != NULL);
931 +       BUG_ON(lck->lk_task != NULL);
932 +
933 +       lck->lk_head = lhead;
934 +       lck->lk_mode = mode;
935 +
936 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
937 +       rc = htree_lock_internal(lck, wait);
938 +       if (rc != 0)
939 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
940 +       return rc >= 0;
941 +}
942 +EXPORT_SYMBOL(htree_lock_try);
943 +
944 +/* it's wrapper of htree_unlock_internal and exported interface.
945 + * It will release all htree_node_locks and htree_lock */
946 +void
947 +htree_unlock(struct htree_lock *lck)
948 +{
949 +       BUG_ON(lck->lk_head == NULL);
950 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
951 +
952 +       htree_node_release_all(lck);
953 +
954 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
955 +       htree_unlock_internal(lck);
956 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
957 +       lck->lk_head = NULL;
958 +       lck->lk_task = NULL;
959 +}
960 +EXPORT_SYMBOL(htree_unlock);
961 +
962 +/* change lock mode */
963 +void
964 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
965 +{
966 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
967 +       lck->lk_mode = mode;
968 +}
969 +EXPORT_SYMBOL(htree_change_mode);
970 +
971 +/* release htree lock, and lock it again with new mode.
972 + * This function will first release all htree_node_locks and htree_lock,
973 + * then try to gain htree_lock with new @mode.
974 + * It always return 1 with granted lock if @wait is true, it can return 0
975 + * if @wait is false and locking request can't be granted immediately */
976 +int
977 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
978 +{
979 +       struct htree_lock_head *lhead = lck->lk_head;
980 +       int rc;
981 +
982 +       BUG_ON(lhead == NULL);
983 +       BUG_ON(lck->lk_mode == mode);
984 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
985 +
986 +       htree_node_release_all(lck);
987 +
988 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
989 +       htree_unlock_internal(lck);
990 +       lck->lk_mode = mode;
991 +       rc = htree_lock_internal(lck, wait);
992 +       if (rc != 0)
993 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
994 +       return rc >= 0;
995 +}
996 +EXPORT_SYMBOL(htree_change_lock_try);
997 +
998 +/* create a htree_lock head with @depth levels (number of child-locks),
999 + * it is a per resoruce structure */
1000 +struct htree_lock_head *
1001 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
1002 +{
1003 +       struct htree_lock_head *lhead;
1004 +       int  i;
1005 +
1006 +       if (depth > HTREE_LOCK_DEP_MAX) {
1007 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1008 +                       depth, HTREE_LOCK_DEP_MAX);
1009 +               return NULL;
1010 +       }
1011 +
1012 +       lhead = kzalloc(offsetof(struct htree_lock_head,
1013 +                                lh_children[depth]) + priv, GFP_NOFS);
1014 +       if (lhead == NULL)
1015 +               return NULL;
1016 +
1017 +       if (hbits < HTREE_HBITS_MIN)
1018 +               hbits = HTREE_HBITS_MIN;
1019 +       else if (hbits > HTREE_HBITS_MAX)
1020 +               hbits = HTREE_HBITS_MAX;
1021 +
1022 +       lhead->lh_hbits = hbits;
1023 +       lhead->lh_lock = 0;
1024 +       lhead->lh_depth = depth;
1025 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
1026 +       if (priv > 0) {
1027 +               lhead->lh_private = (void *)lhead +
1028 +                       offsetof(struct htree_lock_head, lh_children[depth]);
1029 +       }
1030 +
1031 +       for (i = 0; i < depth; i++) {
1032 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
1033 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1034 +       }
1035 +       return lhead;
1036 +}
1037 +EXPORT_SYMBOL(htree_lock_head_alloc);
1038 +
1039 +/* free the htree_lock head */
1040 +void
1041 +htree_lock_head_free(struct htree_lock_head *lhead)
1042 +{
1043 +       int     i;
1044 +
1045 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
1046 +       for (i = 0; i < lhead->lh_depth; i++)
1047 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1048 +       kfree(lhead);
1049 +}
1050 +EXPORT_SYMBOL(htree_lock_head_free);
1051 +
1052 +/* register event callback for @events of child-lock at level @dep */
1053 +void
1054 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1055 +                       unsigned events, htree_event_cb_t callback)
1056 +{
1057 +       BUG_ON(lhead->lh_depth <= dep);
1058 +       lhead->lh_children[dep].lc_events = events;
1059 +       lhead->lh_children[dep].lc_callback = callback;
1060 +}
1061 +EXPORT_SYMBOL(htree_lock_event_attach);
1062 +
1063 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1064 + * extra-bytes as private data for caller */
1065 +struct htree_lock *
1066 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1067 +{
1068 +       struct htree_lock *lck;
1069 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1070 +
1071 +       if (depth > HTREE_LOCK_DEP_MAX) {
1072 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1073 +                       depth, HTREE_LOCK_DEP_MAX);
1074 +               return NULL;
1075 +       }
1076 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1077 +       if (lck == NULL)
1078 +               return NULL;
1079 +
1080 +       if (pbytes != 0)
1081 +               lck->lk_private = (void *)lck + i;
1082 +       lck->lk_mode = HTREE_LOCK_INVAL;
1083 +       lck->lk_depth = depth;
1084 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1085 +
1086 +       for (i = 0; i < depth; i++) {
1087 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1088 +
1089 +               node->ln_mode = HTREE_LOCK_INVAL;
1090 +               INIT_LIST_HEAD(&node->ln_major_list);
1091 +               INIT_LIST_HEAD(&node->ln_minor_list);
1092 +               INIT_LIST_HEAD(&node->ln_alive_list);
1093 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1094 +               INIT_LIST_HEAD(&node->ln_granted_list);
1095 +       }
1096 +
1097 +       return lck;
1098 +}
1099 +EXPORT_SYMBOL(htree_lock_alloc);
1100 +
1101 +/* free htree_lock node */
1102 +void
1103 +htree_lock_free(struct htree_lock *lck)
1104 +{
1105 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1106 +       kfree(lck);
1107 +}
1108 +EXPORT_SYMBOL(htree_lock_free);
1109 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1110 ===================================================================
1111 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile
1112 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1113 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
1114  
1115  ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
1116                 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
1117 +               htree_lock.o \
1118                 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
1119                 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
1120                 xattr_trusted.o inline.o
1121 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1122 ===================================================================
1123 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
1124 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1125 @@ -27,6 +27,7 @@
1126  #include <linux/mutex.h>
1127  #include <linux/timer.h>
1128  #include <linux/wait.h>
1129 +#include <linux/htree_lock.h>
1130  #include <linux/blockgroup_lock.h>
1131  #include <linux/percpu_counter.h>
1132  #include <linux/ratelimit.h>
1133 @@ -821,6 +822,9 @@ struct ext4_inode_info {
1134         __u32   i_dtime;
1135         ext4_fsblk_t    i_file_acl;
1136  
1137 +       /* following fields for parallel directory operations -bzzz */
1138 +       struct semaphore i_append_sem;
1139 +
1140         /*
1141          * i_block_group is the number of the block group which contains
1142          * this file's inode.  Constant across the lifetime of the inode,
1143 @@ -1846,6 +1850,71 @@ struct dx_hash_info
1144   */
1145  #define HASH_NB_ALWAYS         1
1146  
1147 +/* assume name-hash is protected by upper layer */
1148 +#define EXT4_HTREE_LOCK_HASH   0
1149 +
1150 +enum ext4_pdo_lk_types {
1151 +#if EXT4_HTREE_LOCK_HASH
1152 +       EXT4_LK_HASH,
1153 +#endif
1154 +       EXT4_LK_DX,             /* index block */
1155 +       EXT4_LK_DE,             /* directory entry block */
1156 +       EXT4_LK_SPIN,           /* spinlock */
1157 +       EXT4_LK_MAX,
1158 +};
1159 +
1160 +/* read-only bit */
1161 +#define EXT4_LB_RO(b)          (1 << (b))
1162 +/* read + write, high bits for writer */
1163 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1164 +
1165 +enum ext4_pdo_lock_bits {
1166 +       /* DX lock bits */
1167 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
1168 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
1169 +       /* DE lock bits */
1170 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
1171 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
1172 +       /* DX spinlock bits */
1173 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
1174 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
1175 +       /* accurate searching */
1176 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
1177 +};
1178 +
1179 +enum ext4_pdo_lock_opc {
1180 +       /* external */
1181 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1182 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1183 +                                  EXT4_LB_EXACT),
1184 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1185 +                                  EXT4_LB_EXACT),
1186 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1187 +
1188 +       /* internal */
1189 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1190 +                                  EXT4_LB_EXACT),
1191 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1192 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1193 +};
1194 +
1195 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1196 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
1197 +
1198 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1199 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
1200 +
1201 +extern void ext4_htree_lock(struct htree_lock *lck,
1202 +                           struct htree_lock_head *lhead,
1203 +                           struct inode *dir, unsigned flags);
1204 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
1205 +
1206 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
1207 +                                       const struct qstr *d_name,
1208 +                                       struct ext4_dir_entry_2 **res_dir,
1209 +                                       int *inlined, struct htree_lock *lck);
1210 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1211 +                     struct inode *inode, struct htree_lock *lck);
1212  
1213  /*
1214   * Describe an inode's exact location on disk and in memory
1215 @@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in
1216                         const char *name, int namelen, void *data);
1217  static inline void ext4_update_dx_flag(struct inode *inode)
1218  {
1219 +       /* Disable it for ldiskfs, because going from a DX directory to
1220 +        * a non-DX directory while it is in use will completely break
1221 +        * the htree-locking.
1222 +        * If we really want to support this operation in the future,
1223 +        * we need to exclusively lock the directory at here which will
1224 +        * increase complexity of code */
1225 +#if 0
1226         if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1227                                      EXT4_FEATURE_COMPAT_DIR_INDEX))
1228                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1229 +#endif
1230  }
1231  static unsigned char ext4_filetype_table[] = {
1232         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1233 Index: linux-3.10.0-1062.el7.x86_64/fs/ext4/namei.c
1234 ===================================================================
1235 --- linux-3.10.0-1062.el7.x86_64.orig/fs/ext4/namei.c
1236 +++ linux-3.10.0-1062.el7.x86_64/fs/ext4/namei.c
1237 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
1238                                         ext4_lblk_t *block)
1239  {
1240         struct buffer_head *bh;
1241 +       struct ext4_inode_info *ei = EXT4_I(inode);
1242         int err = 0;
1243  
1244         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1245 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
1246                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1247                 return ERR_PTR(-ENOSPC);
1248  
1249 +       /* with parallel dir operations all appends
1250 +       * have to be serialized -bzzz */
1251 +       down(&ei->i_append_sem);
1252 +
1253         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1254  
1255         bh = ext4_bread(handle, inode, *block, 1, &err);
1256 -       if (!bh)
1257 +       if (!bh) {
1258 +               up(&ei->i_append_sem);
1259                 return ERR_PTR(err);
1260 +       }
1261         inode->i_size += inode->i_sb->s_blocksize;
1262         EXT4_I(inode)->i_disksize = inode->i_size;
1263         BUFFER_TRACE(bh, "get_write_access");
1264         err = ext4_journal_get_write_access(handle, bh);
1265 +       up(&ei->i_append_sem);
1266         if (err) {
1267                 brelse(bh);
1268                 ext4_std_error(inode->i_sb, err);
1269 @@ -247,7 +255,7 @@ static struct dx_frame *dx_probe(const s
1270                                  struct inode *dir,
1271                                  struct dx_hash_info *hinfo,
1272                                  struct dx_frame *frame,
1273 -                                int *err);
1274 +                                struct htree_lock *lck, int *err);
1275  static void dx_release(struct dx_frame *frames);
1276  static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1277                        struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1278 @@ -260,13 +268,13 @@ static void dx_insert_block(struct dx_fr
1279  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1280                                  struct dx_frame *frame,
1281                                  struct dx_frame *frames,
1282 -                                __u32 *start_hash);
1283 +                                __u32 *start_hash, struct htree_lock *lck);
1284  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1285                 const struct qstr *d_name,
1286                 struct ext4_dir_entry_2 **res_dir,
1287 -               int *err);
1288 +               struct htree_lock *lck, int *err);
1289  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1290 -                            struct inode *inode);
1291 +                            struct inode *inode, struct htree_lock *lck);
1292  
1293  /* checksumming functions */
1294  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1295 @@ -670,6 +678,227 @@ struct stats dx_show_entries(struct dx_h
1296  }
1297  #endif /* DX_DEBUG */
1298  
1299 +/* private data for htree_lock */
1300 +struct ext4_dir_lock_data {
1301 +       unsigned                ld_flags;  /* bits-map for lock types */
1302 +       unsigned                ld_count;  /* # entries of the last DX block */
1303 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1304 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1305 +};
1306 +
1307 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1308 +#define ext4_find_entry(dir, name, dirent, inline) \
1309 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
1310 +#define ext4_add_entry(handle, dentry, inode) \
1311 +                       __ext4_add_entry(handle, dentry, inode, NULL)
1312 +
1313 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1314 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1315 +
1316 +static void ext4_htree_event_cb(void *target, void *event)
1317 +{
1318 +       u64 *block = (u64 *)target;
1319 +
1320 +       if (*block == dx_get_block((struct dx_entry *)event))
1321 +               *block = EXT4_HTREE_NODE_CHANGED;
1322 +}
1323 +
1324 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1325 +{
1326 +       struct htree_lock_head *lhead;
1327 +
1328 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1329 +       if (lhead != NULL) {
1330 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1331 +                                       ext4_htree_event_cb);
1332 +       }
1333 +       return lhead;
1334 +}
1335 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1336 +
1337 +struct htree_lock *ext4_htree_lock_alloc(void)
1338 +{
1339 +       return htree_lock_alloc(EXT4_LK_MAX,
1340 +                               sizeof(struct ext4_dir_lock_data));
1341 +}
1342 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1343 +
1344 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1345 +{
1346 +       switch (flags) {
1347 +       default: /* 0 or unknown flags require EX lock */
1348 +               return HTREE_LOCK_EX;
1349 +       case EXT4_HLOCK_READDIR:
1350 +               return HTREE_LOCK_PR;
1351 +       case EXT4_HLOCK_LOOKUP:
1352 +               return HTREE_LOCK_CR;
1353 +       case EXT4_HLOCK_DEL:
1354 +       case EXT4_HLOCK_ADD:
1355 +               return HTREE_LOCK_CW;
1356 +       }
1357 +}
1358 +
1359 +/* return PR for read-only operations, otherwise return EX */
1360 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1361 +{
1362 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1363 +
1364 +       /* 0 requires EX lock */
1365 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1366 +}
1367 +
1368 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1369 +{
1370 +       int writer;
1371 +
1372 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1373 +               return 1;
1374 +
1375 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1376 +                EXT4_LB_DE;
1377 +       if (writer) /* all readers & writers are excluded? */
1378 +               return lck->lk_mode == HTREE_LOCK_EX;
1379 +
1380 +       /* all writers are excluded? */
1381 +       return lck->lk_mode == HTREE_LOCK_PR ||
1382 +              lck->lk_mode == HTREE_LOCK_PW ||
1383 +              lck->lk_mode == HTREE_LOCK_EX;
1384 +}
1385 +
1386 +/* relock htree_lock with EX mode if it's change operation, otherwise
1387 + * relock it with PR mode. It's noop if PDO is disabled. */
1388 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1389 +{
1390 +       if (!ext4_htree_safe_locked(lck)) {
1391 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1392 +
1393 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1394 +       }
1395 +}
1396 +
1397 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1398 +                    struct inode *dir, unsigned flags)
1399 +{
1400 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1401 +                                             ext4_htree_safe_mode(flags);
1402 +
1403 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1404 +       htree_lock(lck, lhead, mode);
1405 +       if (!is_dx(dir))
1406 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1407 +}
1408 +EXPORT_SYMBOL(ext4_htree_lock);
1409 +
1410 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1411 +                               unsigned lmask, int wait, void *ev)
1412 +{
1413 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1414 +       u32     mode;
1415 +
1416 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1417 +       if (ext4_htree_safe_locked(lck) ||
1418 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1419 +               return 1;
1420 +
1421 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1422 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1423 +       while (1) {
1424 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1425 +                       return 1;
1426 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1427 +                       return 0;
1428 +               cpu_relax(); /* spin until granted */
1429 +       }
1430 +}
1431 +
1432 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1433 +{
1434 +       return ext4_htree_safe_locked(lck) ||
1435 +              htree_node_is_granted(lck, ffz(~lmask));
1436 +}
1437 +
1438 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1439 +                                  unsigned lmask, void *buf)
1440 +{
1441 +       /* NB: it's safe to call mutiple times or even it's not locked */
1442 +       if (!ext4_htree_safe_locked(lck) &&
1443 +            htree_node_is_granted(lck, ffz(~lmask)))
1444 +               htree_node_unlock(lck, ffz(~lmask), buf);
1445 +}
1446 +
1447 +#define ext4_htree_dx_lock(lck, key)           \
1448 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1449 +#define ext4_htree_dx_lock_try(lck, key)       \
1450 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1451 +#define ext4_htree_dx_unlock(lck)              \
1452 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1453 +#define ext4_htree_dx_locked(lck)              \
1454 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1455 +
1456 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1457 +{
1458 +       struct ext4_dir_lock_data *ld;
1459 +
1460 +       if (ext4_htree_safe_locked(lck))
1461 +               return;
1462 +
1463 +       ld = ext4_htree_lock_data(lck);
1464 +       switch (ld->ld_flags) {
1465 +       default:
1466 +               return;
1467 +       case EXT4_HLOCK_LOOKUP:
1468 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1469 +               return;
1470 +       case EXT4_HLOCK_DEL:
1471 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1472 +               return;
1473 +       case EXT4_HLOCK_ADD:
1474 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1475 +               return;
1476 +       }
1477 +}
1478 +
1479 +#define ext4_htree_de_lock(lck, key)           \
1480 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1481 +#define ext4_htree_de_unlock(lck)              \
1482 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1483 +
1484 +#define ext4_htree_spin_lock(lck, key, event)  \
1485 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1486 +#define ext4_htree_spin_unlock(lck)            \
1487 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1488 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1489 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1490 +
1491 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1492 +{
1493 +       if (!ext4_htree_safe_locked(lck) &&
1494 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1495 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1496 +}
1497 +
1498 +enum {
1499 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1500 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1501 +       DX_HASH_COL_NO,         /* there is no collision */
1502 +};
1503 +
1504 +static int dx_probe_hash_collision(struct htree_lock *lck,
1505 +                                  struct dx_entry *entries,
1506 +                                  struct dx_entry *at, u32 hash)
1507 +{
1508 +       if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1509 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1510 +
1511 +       } else if (at == entries + dx_get_count(entries) - 1) {
1512 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1513 +
1514 +       } else { /* hash collision? */
1515 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1516 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1517 +       }
1518 +}
1519 +
1520  /*
1521   * Probe for a directory leaf block to search.
1522   *
1523 @@ -681,10 +910,11 @@ struct stats dx_show_entries(struct dx_h
1524   */
1525  static struct dx_frame *
1526  dx_probe(const struct qstr *d_name, struct inode *dir,
1527 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1528 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1529 +        struct htree_lock *lck, int *err)
1530  {
1531         unsigned count, indirect;
1532 -       struct dx_entry *at, *entries, *p, *q, *m;
1533 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1534         struct dx_root_info *info;
1535         struct buffer_head *bh;
1536         struct dx_frame *frame = frame_in;
1537 @@ -758,8 +988,15 @@ dx_probe(const struct qstr *d_name, stru
1538         dxtrace(printk("Look up %x", hash));
1539         while (1)
1540         {
1541 +               if (indirect == 0) { /* the last index level */
1542 +                       /* NB: ext4_htree_dx_lock() could be noop if
1543 +                        * DX-lock flag is not set for current operation */
1544 +                       ext4_htree_dx_lock(lck, dx);
1545 +                       ext4_htree_spin_lock(lck, dx, NULL);
1546 +               }
1547                 count = dx_get_count(entries);
1548 -               if (!count || count > dx_get_limit(entries)) {
1549 +               if (count == 0 || count > dx_get_limit(entries)) {
1550 +                       ext4_htree_spin_unlock(lck); /* release spin */
1551                         ext4_warning(dir->i_sb,
1552                                      "dx entry: no count or count > limit");
1553                         brelse(bh);
1554 @@ -800,7 +1037,70 @@ dx_probe(const struct qstr *d_name, stru
1555                 frame->bh = bh;
1556                 frame->entries = entries;
1557                 frame->at = at;
1558 -               if (!indirect--) return frame;
1559 +
1560 +               if (indirect == 0) { /* the last index level */
1561 +                       struct ext4_dir_lock_data *ld;
1562 +                       u64 myblock;
1563 +
1564 +                       /* By default we only lock DE-block, however, we will
1565 +                        * also lock the last level DX-block if:
1566 +                        * a) there is hash collision
1567 +                        *    we will set DX-lock flag (a few lines below)
1568 +                        *    and redo to lock DX-block
1569 +                        *    see detail in dx_probe_hash_collision()
1570 +                        * b) it's a retry from splitting
1571 +                        *    we need to lock the last level DX-block so nobody
1572 +                        *    else can split any leaf blocks under the same
1573 +                        *    DX-block, see detail in ext4_dx_add_entry()
1574 +                        */
1575 +                       if (ext4_htree_dx_locked(lck)) {
1576 +                               /* DX-block is locked, just lock DE-block
1577 +                                * and return */
1578 +                               ext4_htree_spin_unlock(lck);
1579 +                               if (!ext4_htree_safe_locked(lck))
1580 +                                       ext4_htree_de_lock(lck, frame->at);
1581 +                               return frame;
1582 +                       }
1583 +                       /* it's pdirop and no DX lock */
1584 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1585 +                           DX_HASH_COL_YES) {
1586 +                               /* found hash collision, set DX-lock flag
1587 +                                * and retry to abtain DX-lock */
1588 +                               ext4_htree_spin_unlock(lck);
1589 +                               ext4_htree_dx_need_lock(lck);
1590 +                               continue;
1591 +                       }
1592 +                       ld = ext4_htree_lock_data(lck);
1593 +                       /* because I don't lock DX, so @at can't be trusted
1594 +                        * after I release spinlock so I have to save it */
1595 +                       ld->ld_at = at;
1596 +                       ld->ld_at_entry = *at;
1597 +                       ld->ld_count = dx_get_count(entries);
1598 +
1599 +                       frame->at = &ld->ld_at_entry;
1600 +                       myblock = dx_get_block(at);
1601 +
1602 +                       /* NB: ordering locking */
1603 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1604 +                       /* other thread can split this DE-block because:
1605 +                        * a) I don't have lock for the DE-block yet
1606 +                        * b) I released spinlock on DX-block
1607 +                        * if it happened I can detect it by listening
1608 +                        * splitting event on this DE-block */
1609 +                       ext4_htree_de_lock(lck, frame->at);
1610 +                       ext4_htree_spin_stop_listen(lck);
1611 +
1612 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1613 +                               /* someone split this DE-block before
1614 +                                * I locked it, I need to retry and lock
1615 +                                * valid DE-block */
1616 +                               ext4_htree_de_unlock(lck);
1617 +                               continue;
1618 +                       }
1619 +                       return frame;
1620 +               }
1621 +               dx = at;
1622 +               indirect--;
1623                 bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1624                 if (IS_ERR(bh)) {
1625                         *err = PTR_ERR(bh);
1626 @@ -868,7 +1168,7 @@ static void dx_release (struct dx_frame 
1627  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1628                                  struct dx_frame *frame,
1629                                  struct dx_frame *frames,
1630 -                                __u32 *start_hash)
1631 +                                __u32 *start_hash, struct htree_lock *lck)
1632  {
1633         struct dx_frame *p;
1634         struct buffer_head *bh;
1635 @@ -883,12 +1183,22 @@ static int ext4_htree_next_block(struct 
1636          * this loop, num_frames indicates the number of interior
1637          * nodes need to be read.
1638          */
1639 +       ext4_htree_de_unlock(lck);
1640         while (1) {
1641 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1642 -                       break;
1643 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1644 +                       /* num_frames > 0 :
1645 +                        *   DX block
1646 +                        * ext4_htree_dx_locked:
1647 +                        *   frame->at is reliable pointer returned by dx_probe,
1648 +                        *   otherwise dx_probe already knew no collision */
1649 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1650 +                               break;
1651 +               }
1652                 if (p == frames)
1653                         return 0;
1654                 num_frames++;
1655 +               if (num_frames == 1)
1656 +                       ext4_htree_dx_unlock(lck);
1657                 p--;
1658         }
1659  
1660 @@ -911,6 +1221,13 @@ static int ext4_htree_next_block(struct 
1661          * block so no check is necessary
1662          */
1663         while (num_frames--) {
1664 +               if (num_frames == 0) {
1665 +                       /* it's not always necessary, we just don't want to
1666 +                        * detect hash collision again */
1667 +                       ext4_htree_dx_need_lock(lck);
1668 +                       ext4_htree_dx_lock(lck, p->at);
1669 +               }
1670 +
1671                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1672                 if (IS_ERR(bh))
1673                         return PTR_ERR(bh);
1674 @@ -919,6 +1236,7 @@ static int ext4_htree_next_block(struct 
1675                 p->bh = bh;
1676                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1677         }
1678 +       ext4_htree_de_lock(lck, p->at);
1679         return 1;
1680  }
1681  
1682 @@ -1021,10 +1339,10 @@ int ext4_htree_fill_tree(struct file *di
1683         }
1684         hinfo.hash = start_hash;
1685         hinfo.minor_hash = 0;
1686 -       frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1687 +       /* assume it's PR locked */
1688 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1689         if (!frame)
1690                 return err;
1691 -
1692         /* Add '.' and '..' from the htree header */
1693         if (!start_hash && !start_minor_hash) {
1694                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1695 @@ -1051,7 +1369,7 @@ int ext4_htree_fill_tree(struct file *di
1696                 count += ret;
1697                 hashval = ~0;
1698                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1699 -                                           frame, frames, &hashval);
1700 +                                           frame, frames, &hashval, NULL);
1701                 *next_hash = hashval;
1702                 if (ret < 0) {
1703                         err = ret;
1704 @@ -1244,10 +1562,10 @@ static int is_dx_internal_node(struct in
1705   * The returned buffer_head has ->b_count elevated.  The caller is expected
1706   * to brelse() it when appropriate.
1707   */
1708 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1709 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1710                                         const struct qstr *d_name,
1711                                         struct ext4_dir_entry_2 **res_dir,
1712 -                                       int *inlined)
1713 +                                       int *inlined, struct htree_lock *lck)
1714  {
1715         struct super_block *sb;
1716         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1717 @@ -1291,7 +1609,7 @@ static struct buffer_head * ext4_find_en
1718                 goto restart;
1719         }
1720         if (is_dx(dir)) {
1721 -               bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1722 +               bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1723                 /*
1724                  * On success, or if the error was file not found,
1725                  * return.  Otherwise, fall back to doing a search the
1726 @@ -1305,6 +1623,7 @@ static struct buffer_head * ext4_find_en
1727                         return bh;
1728                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1729                                "falling back\n"));
1730 +               ext4_htree_safe_relock(lck);
1731                 ret = NULL;
1732         }
1733         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1734 @@ -1402,9 +1721,12 @@ cleanup_and_exit:
1735                 brelse(bh_use[ra_ptr]);
1736         return ret;
1737  }
1738 +EXPORT_SYMBOL(__ext4_find_entry);
1739  
1740 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1741 -                      struct ext4_dir_entry_2 **res_dir, int *err)
1742 +static struct buffer_head *ext4_dx_find_entry(struct inode *dir,
1743 +                               const struct qstr *d_name,
1744 +                               struct ext4_dir_entry_2 **res_dir,
1745 +                               struct htree_lock *lck, int *err)
1746  {
1747         struct super_block * sb = dir->i_sb;
1748         struct dx_hash_info     hinfo;
1749 @@ -1413,7 +1735,7 @@ static struct buffer_head * ext4_dx_find
1750         ext4_lblk_t block;
1751         int retval;
1752  
1753 -       if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1754 +       if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1755                 return NULL;
1756         do {
1757                 block = dx_get_block(frame->at);
1758 @@ -1437,7 +1759,7 @@ static struct buffer_head * ext4_dx_find
1759  
1760                 /* Check to see if we should continue to search */
1761                 retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1762 -                                              frames, NULL);
1763 +                                              frames, NULL, lck);
1764                 if (retval < 0) {
1765                         ext4_warning(sb,
1766                              "error reading index page in directory #%lu",
1767 @@ -1597,8 +1919,9 @@ static struct ext4_dir_entry_2* dx_pack_
1768   * Returns pointer to de in block into which the new entry will be inserted.
1769   */
1770  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1771 -                       struct buffer_head **bh,struct dx_frame *frame,
1772 -                       struct dx_hash_info *hinfo, int *error)
1773 +                       struct buffer_head **bh, struct dx_frame *frames,
1774 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1775 +                       struct htree_lock *lck, int *error)
1776  {
1777         unsigned blocksize = dir->i_sb->s_blocksize;
1778         unsigned count, continued;
1779 @@ -1661,7 +1984,14 @@ static struct ext4_dir_entry_2 *do_split
1780                                         hash2, split, count-split));
1781  
1782         /* Fancy dance to stay within two buffers */
1783 -       de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1784 +       if (hinfo->hash < hash2) {
1785 +               de2 = dx_move_dirents(data1, data2, map + split,
1786 +                                     count - split, blocksize);
1787 +       } else {
1788 +               /* make sure we will add entry to the same block which
1789 +                * we have already locked */
1790 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1791 +       }
1792         de = dx_pack_dirents(data1, blocksize);
1793         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1794                                            (char *) de,
1795 @@ -1680,13 +2010,21 @@ static struct ext4_dir_entry_2 *do_split
1796         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1797         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1798  
1799 -       /* Which block gets the new entry? */
1800 -       if (hinfo->hash >= hash2)
1801 -       {
1802 -               swap(*bh, bh2);
1803 -               de = de2;
1804 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1805 +                            frame->at); /* notify block is being split */
1806 +       if (hinfo->hash < hash2) {
1807 +               dx_insert_block(frame, hash2 + continued, newblock);
1808 +
1809 +       } else {
1810 +               /* switch block number */
1811 +               dx_insert_block(frame, hash2 + continued,
1812 +                               dx_get_block(frame->at));
1813 +               dx_set_block(frame->at, newblock);
1814 +               (frame->at)++;
1815         }
1816 -       dx_insert_block(frame, hash2 + continued, newblock);
1817 +       ext4_htree_spin_unlock(lck);
1818 +       ext4_htree_dx_unlock(lck);
1819 +
1820         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1821         if (err)
1822                 goto journal_error;
1823 @@ -1965,7 +2303,7 @@ static int make_indexed_dir(handle_t *ha
1824         if (retval)
1825                 goto out_frames;        
1826  
1827 -       de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
1828 +       de = do_split(handle, dir, &bh2, frames, frame, &hinfo, NULL, &retval);
1829         if (!de) {
1830                 goto out_frames;
1831         }
1832 @@ -2072,8 +2410,8 @@ out:
1833   * may not sleep between calling this and putting something into
1834   * the entry, as someone else might have used it while you slept.
1835   */
1836 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1837 -                         struct inode *inode)
1838 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1839 +                     struct inode *inode, struct htree_lock *lck)
1840  {
1841         struct inode *dir = dentry->d_parent->d_inode;
1842         struct buffer_head *bh = NULL;
1843 @@ -2108,9 +2446,10 @@ static int ext4_add_entry(handle_t *hand
1844                 if (dentry->d_name.len == 2 &&
1845                     memcmp(dentry->d_name.name, "..", 2) == 0)
1846                         return ext4_update_dotdot(handle, dentry, inode);
1847 -               retval = ext4_dx_add_entry(handle, dentry, inode);
1848 +               retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1849                 if (!retval || (retval != ERR_BAD_DX_DIR))
1850                         goto out;
1851 +               ext4_htree_safe_relock(lck);
1852                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1853                 dx_fallback++;
1854                 ext4_mark_inode_dirty(handle, dir);
1855 @@ -2152,12 +2491,13 @@ out:
1856                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1857         return retval;
1858  }
1859 +EXPORT_SYMBOL(__ext4_add_entry);
1860  
1861  /*
1862   * Returns 0 for success, or a negative error value
1863   */
1864  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1865 -                            struct inode *inode)
1866 +                            struct inode *inode, struct htree_lock *lck)
1867  {
1868         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1869         struct dx_entry *entries, *at;
1870 @@ -2171,7 +2511,7 @@ static int ext4_dx_add_entry(handle_t *h
1871  
1872  again:
1873         restart = 0;
1874 -       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1875 +       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1876         if (!frame)
1877                 return err;
1878         entries = frame->entries;
1879 @@ -2201,6 +2541,11 @@ again:
1880                 struct dx_node *node2;
1881                 struct buffer_head *bh2;
1882  
1883 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1884 +                       ext4_htree_safe_relock(lck);
1885 +                       restart = 1;
1886 +                       goto cleanup;
1887 +               }
1888                 while (frame > frames) {
1889                         if (dx_get_count((frame - 1)->entries) <
1890                             dx_get_limit((frame - 1)->entries)) {
1891 @@ -2304,16 +2649,43 @@ again:
1892                         restart = 1;
1893                         goto journal_error;
1894                 }
1895 +       } else if (!ext4_htree_dx_locked(lck)) {
1896 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1897 +
1898 +               /* not well protected, require DX lock */
1899 +               ext4_htree_dx_need_lock(lck);
1900 +               at = frame > frames ? (frame - 1)->at : NULL;
1901 +
1902 +               /* NB: no risk of deadlock because it's just a try.
1903 +                *
1904 +                * NB: we check ld_count for twice, the first time before
1905 +                * having DX lock, the second time after holding DX lock.
1906 +                *
1907 +                * NB: We never free blocks for directory so far, which
1908 +                * means value returned by dx_get_count() should equal to
1909 +                * ld->ld_count if nobody split any DE-block under @at,
1910 +                * and ld->ld_at still points to valid dx_entry. */
1911 +               if ((ld->ld_count != dx_get_count(entries)) ||
1912 +                   !ext4_htree_dx_lock_try(lck, at) ||
1913 +                   (ld->ld_count != dx_get_count(entries))) {
1914 +                       restart = 1;
1915 +                       goto cleanup;
1916 +               }
1917 +               /* OK, I've got DX lock and nothing changed */
1918 +               frame->at = ld->ld_at;
1919         }
1920 -       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1921 +       de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
1922         if (!de)
1923                 goto cleanup;
1924 +
1925         err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1926         goto cleanup;
1927  
1928  journal_error:
1929         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1930  cleanup:
1931 +       ext4_htree_dx_unlock(lck);
1932 +       ext4_htree_de_unlock(lck);
1933         brelse(bh);
1934         dx_release(frames);
1935         /* @restart is true means htree-path has been changed, we need to
1936 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1937 ===================================================================
1938 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c
1939 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1940 @@ -875,6 +875,7 @@ static struct inode *ext4_alloc_inode(st
1941  
1942         ei->vfs_inode.i_version = 1;
1943         spin_lock_init(&ei->i_raw_lock);
1944 +       sema_init(&ei->i_append_sem, 1);
1945         INIT_LIST_HEAD(&ei->i_prealloc_list);
1946         spin_lock_init(&ei->i_prealloc_lock);
1947         ext4_es_init_tree(&ei->i_es_tree);