Whamcloud - gitweb
2ff2e2307ad47e0576d171bbfd8c6b36787dceed
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / sles12 / ext4-pdirop.patch
1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
12     
13 This patch contains:
14  - pdirops support for ldiskfs
15  - integrate with osd-ldiskfs
16
17 Index: linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
18 ===================================================================
19 --- /dev/null
20 +++ linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
21 @@ -0,0 +1,187 @@
22 +/*
23 + * include/linux/htree_lock.h
24 + *
25 + * Copyright (c) 2011, 2012, Intel Corporation.
26 + *
27 + * Author: Liang Zhen <liang@whamcloud.com>
28 + */
29 +
30 +/*
31 + * htree lock
32 + *
33 + * htree_lock is an advanced lock, it can support five lock modes (concept is
34 + * taken from DLM) and it's a sleeping lock.
35 + *
36 + * most common use case is:
37 + * - create a htree_lock_head for data
38 + * - each thread (contender) creates it's own htree_lock
39 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
40 + *   call htree_unlock to release lock
41 + *
42 + * Also, there is advanced use-case which is more complex, user can have
43 + * PW/PR lock on particular key, it's mostly used while user holding shared
44 + * lock on the htree (CW, CR)
45 + *
46 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
47 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
48 + * ...
49 + * htree_node_unlock(lock_node);; unlock the key
50 + *
51 + * Another tip is, we can have N-levels of this kind of keys, all we need to
52 + * do is specifying N-levels while creating htree_lock_head, then we can
53 + * lock/unlock a specific level by:
54 + * htree_node_lock(lock_node, mode1, key1, level1...);
55 + * do something;
56 + * htree_node_lock(lock_node, mode1, key2, level2...);
57 + * do something;
58 + * htree_node_unlock(lock_node, level2);
59 + * htree_node_unlock(lock_node, level1);
60 + *
61 + * NB: for multi-level, should be careful about locking order to avoid deadlock
62 + */
63 +
64 +#ifndef _LINUX_HTREE_LOCK_H
65 +#define _LINUX_HTREE_LOCK_H
66 +
67 +#include <linux/list.h>
68 +#include <linux/spinlock.h>
69 +#include <linux/sched.h>
70 +
71 +/*
72 + * Lock Modes
73 + * more details can be found here:
74 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
75 + */
76 +typedef enum {
77 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
78 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
79 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
80 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
81 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
82 +       HTREE_LOCK_MAX,      /* number of lock modes */
83 +} htree_lock_mode_t;
84 +
85 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
86 +#define HTREE_LOCK_INVAL       0xdead10c
87 +
88 +enum {
89 +       HTREE_HBITS_MIN         = 2,
90 +       HTREE_HBITS_DEF         = 14,
91 +       HTREE_HBITS_MAX         = 32,
92 +};
93 +
94 +enum {
95 +       HTREE_EVENT_DISABLE     = (0),
96 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
97 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
98 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
99 +};
100 +
101 +struct htree_lock;
102 +
103 +typedef void (*htree_event_cb_t)(void *target, void *event);
104 +
105 +struct htree_lock_child {
106 +       struct list_head        lc_list;        /* granted list */
107 +       htree_event_cb_t        lc_callback;    /* event callback */
108 +       unsigned                lc_events;      /* event types */
109 +};
110 +
111 +struct htree_lock_head {
112 +       unsigned long           lh_lock;        /* bits lock */
113 +       /* blocked lock list (htree_lock) */
114 +       struct list_head        lh_blocked_list;
115 +       /* # key levels */
116 +       u16                     lh_depth;
117 +       /* hash bits for key and limit number of locks */
118 +       u16                     lh_hbits;
119 +       /* counters for blocked locks */
120 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
121 +       /* counters for granted locks */
122 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
123 +       /* private data */
124 +       void                    *lh_private;
125 +       /* array of children locks */
126 +       struct htree_lock_child lh_children[0];
127 +};
128 +
129 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
130 +struct htree_lock_node {
131 +       htree_lock_mode_t       ln_mode;
132 +       /* major hash key */
133 +       u16                     ln_major_key;
134 +       /* minor hash key */
135 +       u16                     ln_minor_key;
136 +       struct list_head        ln_major_list;
137 +       struct list_head        ln_minor_list;
138 +       /* alive list, all locks (granted, blocked, listening) are on it */
139 +       struct list_head        ln_alive_list;
140 +       /* blocked list */
141 +       struct list_head        ln_blocked_list;
142 +       /* granted list */
143 +       struct list_head        ln_granted_list;
144 +       void                    *ln_ev_target;
145 +};
146 +
147 +struct htree_lock {
148 +       struct task_struct      *lk_task;
149 +       struct htree_lock_head  *lk_head;
150 +       void                    *lk_private;
151 +       unsigned                lk_depth;
152 +       htree_lock_mode_t       lk_mode;
153 +       struct list_head        lk_blocked_list;
154 +       struct htree_lock_node  lk_nodes[0];
155 +};
156 +
157 +/* create a lock head, which stands for a resource */
158 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
159 +                                             unsigned hbits, unsigned priv);
160 +/* free a lock head */
161 +void htree_lock_head_free(struct htree_lock_head *lhead);
162 +/* register event callback for child lock at level @depth */
163 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
164 +                            unsigned events, htree_event_cb_t callback);
165 +/* create a lock handle, which stands for a thread */
166 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
167 +/* free a lock handle */
168 +void htree_lock_free(struct htree_lock *lck);
169 +/* lock htree, when @wait is true, 0 is returned if the lock can't
170 + * be granted immediately */
171 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
172 +                  htree_lock_mode_t mode, int wait);
173 +/* unlock htree */
174 +void htree_unlock(struct htree_lock *lck);
175 +/* unlock and relock htree with @new_mode */
176 +int htree_change_lock_try(struct htree_lock *lck,
177 +                         htree_lock_mode_t new_mode, int wait);
178 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
179 +/* require child lock (key) of htree at level @dep, @event will be sent to all
180 + * listeners on this @key while lock being granted */
181 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
182 +                       u32 key, unsigned dep, int wait, void *event);
183 +/* release child lock at level @dep, this lock will listen on it's key
184 + * if @event isn't NULL, event_cb will be called against @lck while granting
185 + * any other lock at level @dep with the same key */
186 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
187 +/* stop listening on child lock at level @dep */
188 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
189 +/* for debug */
190 +void htree_lock_stat_print(int depth);
191 +void htree_lock_stat_reset(void);
192 +
193 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
194 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
195 +
196 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
197 +
198 +#define htree_node_lock(lck, mode, key, dep)   \
199 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
200 +/* this is only safe in thread context of lock owner */
201 +#define htree_node_is_granted(lck, dep)                \
202 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
203 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
204 +/* this is only safe in thread context of lock owner */
205 +#define htree_node_is_listening(lck, dep)      \
206 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
207 +
208 +#endif
209 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
210 ===================================================================
211 --- /dev/null
212 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
213 @@ -0,0 +1,880 @@
214 +/*
215 + * fs/ext4/htree_lock.c
216 + *
217 + * Copyright (c) 2011, 2012, Intel Corporation.
218 + *
219 + * Author: Liang Zhen <liang@whamcloud.com>
220 + */
221 +#include <linux/jbd2.h>
222 +#include <linux/hash.h>
223 +#include <linux/module.h>
224 +#include <linux/htree_lock.h>
225 +
226 +enum {
227 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
228 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
229 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
230 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
231 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
232 +};
233 +
234 +enum {
235 +       HTREE_LOCK_COMPAT_EX    = 0,
236 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
237 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
238 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
239 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
240 +                                 HTREE_LOCK_BIT_PW,
241 +};
242 +
243 +static int htree_lock_compat[] = {
244 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
245 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
246 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
247 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
248 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
249 +};
250 +
251 +/* max allowed htree-lock depth.
252 + * We only need depth=3 for ext4 although user can have higher value. */
253 +#define HTREE_LOCK_DEP_MAX     16
254 +
255 +#ifdef HTREE_LOCK_DEBUG
256 +
257 +static char *hl_name[] = {
258 +       [HTREE_LOCK_EX]         "EX",
259 +       [HTREE_LOCK_PW]         "PW",
260 +       [HTREE_LOCK_PR]         "PR",
261 +       [HTREE_LOCK_CW]         "CW",
262 +       [HTREE_LOCK_CR]         "CR",
263 +};
264 +
265 +/* lock stats */
266 +struct htree_lock_node_stats {
267 +       unsigned long long      blocked[HTREE_LOCK_MAX];
268 +       unsigned long long      granted[HTREE_LOCK_MAX];
269 +       unsigned long long      retried[HTREE_LOCK_MAX];
270 +       unsigned long long      events;
271 +};
272 +
273 +struct htree_lock_stats {
274 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
275 +       unsigned long long      granted[HTREE_LOCK_MAX];
276 +       unsigned long long      blocked[HTREE_LOCK_MAX];
277 +};
278 +
279 +static struct htree_lock_stats hl_stats;
280 +
281 +void htree_lock_stat_reset(void)
282 +{
283 +       memset(&hl_stats, 0, sizeof(hl_stats));
284 +}
285 +
286 +void htree_lock_stat_print(int depth)
287 +{
288 +       int     i;
289 +       int     j;
290 +
291 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
292 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
293 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
294 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
295 +       }
296 +       for (i = 0; i < depth; i++) {
297 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
298 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
299 +                       printk(KERN_DEBUG
300 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
301 +                               hl_name[j], hl_stats.nodes[i].granted[j],
302 +                               hl_stats.nodes[i].blocked[j],
303 +                               hl_stats.nodes[i].retried[j]);
304 +               }
305 +       }
306 +}
307 +
308 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
309 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
310 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
311 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
312 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
313 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
314 +
315 +#else /* !DEBUG */
316 +
317 +void htree_lock_stat_reset(void) {}
318 +void htree_lock_stat_print(int depth) {}
319 +
320 +#define lk_grant_inc(m)              do {} while (0)
321 +#define lk_block_inc(m)              do {} while (0)
322 +#define ln_grant_inc(d, m)    do {} while (0)
323 +#define ln_block_inc(d, m)    do {} while (0)
324 +#define ln_retry_inc(d, m)    do {} while (0)
325 +#define ln_event_inc(d)              do {} while (0)
326 +
327 +#endif /* DEBUG */
328 +
329 +EXPORT_SYMBOL(htree_lock_stat_reset);
330 +EXPORT_SYMBOL(htree_lock_stat_print);
331 +
332 +#define HTREE_DEP_ROOT           (-1)
333 +
334 +#define htree_spin_lock(lhead, dep)                            \
335 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
336 +#define htree_spin_unlock(lhead, dep)                          \
337 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
338 +
339 +#define htree_key_event_ignore(child, ln)                      \
340 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
341 +
342 +static int
343 +htree_key_list_empty(struct htree_lock_node *ln)
344 +{
345 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
346 +}
347 +
348 +static void
349 +htree_key_list_del_init(struct htree_lock_node *ln)
350 +{
351 +       struct htree_lock_node *tmp = NULL;
352 +
353 +       if (!list_empty(&ln->ln_minor_list)) {
354 +               tmp = list_entry(ln->ln_minor_list.next,
355 +                                struct htree_lock_node, ln_minor_list);
356 +               list_del_init(&ln->ln_minor_list);
357 +       }
358 +
359 +       if (list_empty(&ln->ln_major_list))
360 +               return;
361 +
362 +       if (tmp == NULL) { /* not on minor key list */
363 +               list_del_init(&ln->ln_major_list);
364 +       } else {
365 +               BUG_ON(!list_empty(&tmp->ln_major_list));
366 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
367 +       }
368 +}
369 +
370 +static void
371 +htree_key_list_replace_init(struct htree_lock_node *old,
372 +                           struct htree_lock_node *new)
373 +{
374 +       if (!list_empty(&old->ln_major_list))
375 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
376 +
377 +       if (!list_empty(&old->ln_minor_list))
378 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
379 +}
380 +
381 +static void
382 +htree_key_event_enqueue(struct htree_lock_child *child,
383 +                       struct htree_lock_node *ln, int dep, void *event)
384 +{
385 +       struct htree_lock_node *tmp;
386 +
387 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
388 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
389 +       if (event == NULL || htree_key_event_ignore(child, ln))
390 +               return;
391 +
392 +       /* shouldn't be a very long list */
393 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
394 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
395 +                       ln_event_inc(dep);
396 +                       if (child->lc_callback != NULL)
397 +                               child->lc_callback(tmp->ln_ev_target, event);
398 +               }
399 +       }
400 +}
401 +
402 +static int
403 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
404 +                       unsigned dep, int wait, void *event)
405 +{
406 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
407 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
408 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
409 +
410 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
411 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
412 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
413 +        * NL is only used for listener, user can't directly require NL mode */
414 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
415 +           (curln->ln_mode != HTREE_LOCK_PW &&
416 +            newln->ln_mode != HTREE_LOCK_PW)) {
417 +               /* no conflict, attach it on granted list of @curlk */
418 +               if (curln->ln_mode != HTREE_LOCK_NL) {
419 +                       list_add(&newln->ln_granted_list,
420 +                                &curln->ln_granted_list);
421 +               } else {
422 +                       /* replace key owner */
423 +                       htree_key_list_replace_init(curln, newln);
424 +               }
425 +
426 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
427 +               htree_key_event_enqueue(child, newln, dep, event);
428 +               ln_grant_inc(dep, newln->ln_mode);
429 +               return 1; /* still hold lh_lock */
430 +       }
431 +
432 +       if (!wait) { /* can't grant and don't want to wait */
433 +               ln_retry_inc(dep, newln->ln_mode);
434 +               newln->ln_mode = HTREE_LOCK_INVAL;
435 +               return -1; /* don't wait and just return -1 */
436 +       }
437 +
438 +       newlk->lk_task = current;
439 +       set_current_state(TASK_UNINTERRUPTIBLE);
440 +       /* conflict, attach it on blocked list of curlk */
441 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
442 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
443 +       ln_block_inc(dep, newln->ln_mode);
444 +
445 +       htree_spin_unlock(newlk->lk_head, dep);
446 +       /* wait to be given the lock */
447 +       if (newlk->lk_task != NULL)
448 +               schedule();
449 +       /* granted, no doubt, wake up will set me RUNNING */
450 +       if (event == NULL || htree_key_event_ignore(child, newln))
451 +               return 0; /* granted without lh_lock */
452 +
453 +       htree_spin_lock(newlk->lk_head, dep);
454 +       htree_key_event_enqueue(child, newln, dep, event);
455 +       return 1; /* still hold lh_lock */
456 +}
457 +
458 +/*
459 + * get PR/PW access to particular tree-node according to @dep and @key,
460 + * it will return -1 if @wait is false and can't immediately grant this lock.
461 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
462 + * @event if it's not NULL.
463 + * NB: ALWAYS called holding lhead::lh_lock
464 + */
465 +static int
466 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
467 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
468 +                        int wait, void *event)
469 +{
470 +       LIST_HEAD(list);
471 +       struct htree_lock       *tmp;
472 +       struct htree_lock       *tmp2;
473 +       u16                     major;
474 +       u16                     minor;
475 +       u8                      reverse;
476 +       u8                      ma_bits;
477 +       u8                      mi_bits;
478 +
479 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
480 +       BUG_ON(htree_node_is_granted(lck, dep));
481 +
482 +       key = hash_long(key, lhead->lh_hbits);
483 +
484 +       mi_bits = lhead->lh_hbits >> 1;
485 +       ma_bits = lhead->lh_hbits - mi_bits;
486 +
487 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
488 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
489 +       lck->lk_nodes[dep].ln_mode = mode;
490 +
491 +       /*
492 +        * The major key list is an ordered list, so searches are started
493 +        * at the end of the list that is numerically closer to major_key,
494 +        * so at most half of the list will be walked (for well-distributed
495 +        * keys). The list traversal aborts early if the expected key
496 +        * location is passed.
497 +        */
498 +       reverse = (major >= (1 << (ma_bits - 1)));
499 +
500 +       if (reverse) {
501 +               list_for_each_entry_reverse(tmp,
502 +                                       &lhead->lh_children[dep].lc_list,
503 +                                       lk_nodes[dep].ln_major_list) {
504 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
505 +                               goto search_minor;
506 +
507 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
508 +                               /* attach _after_ @tmp */
509 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
510 +                                        &tmp->lk_nodes[dep].ln_major_list);
511 +                               goto out_grant_major;
512 +                       }
513 +               }
514 +
515 +               list_add(&lck->lk_nodes[dep].ln_major_list,
516 +                        &lhead->lh_children[dep].lc_list);
517 +               goto out_grant_major;
518 +
519 +       } else {
520 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
521 +                                   lk_nodes[dep].ln_major_list) {
522 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
523 +                               goto search_minor;
524 +
525 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
526 +                               /* insert _before_ @tmp */
527 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
528 +                                       &tmp->lk_nodes[dep].ln_major_list);
529 +                               goto out_grant_major;
530 +                       }
531 +               }
532 +
533 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
534 +                             &lhead->lh_children[dep].lc_list);
535 +               goto out_grant_major;
536 +       }
537 +
538 + search_minor:
539 +       /*
540 +        * NB: minor_key list doesn't have a "head", @list is just a
541 +        * temporary stub for helping list searching, make sure it's removed
542 +        * after searching.
543 +        * minor_key list is an ordered list too.
544 +        */
545 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
546 +
547 +       reverse = (minor >= (1 << (mi_bits - 1)));
548 +
549 +       if (reverse) {
550 +               list_for_each_entry_reverse(tmp2, &list,
551 +                                           lk_nodes[dep].ln_minor_list) {
552 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
553 +                               goto out_enqueue;
554 +
555 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
556 +                               /* attach _after_ @tmp2 */
557 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
558 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
559 +                               goto out_grant_minor;
560 +                       }
561 +               }
562 +
563 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
564 +
565 +       } else {
566 +               list_for_each_entry(tmp2, &list,
567 +                                   lk_nodes[dep].ln_minor_list) {
568 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
569 +                               goto out_enqueue;
570 +
571 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
572 +                               /* insert _before_ @tmp2 */
573 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
574 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
575 +                               goto out_grant_minor;
576 +                       }
577 +               }
578 +
579 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
580 +       }
581 +
582 + out_grant_minor:
583 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
584 +               /* new lock @lck is the first one on minor_key list, which
585 +                * means it has the smallest minor_key and it should
586 +                * replace @tmp as minor_key owner */
587 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
588 +                                 &lck->lk_nodes[dep].ln_major_list);
589 +       }
590 +       /* remove the temporary head */
591 +       list_del(&list);
592 +
593 + out_grant_major:
594 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
595 +       return 1; /* granted with holding lh_lock */
596 +
597 + out_enqueue:
598 +       list_del(&list); /* remove temprary head */
599 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
600 +}
601 +
602 +/*
603 + * release the key of @lck at level @dep, and grant any blocked locks.
604 + * caller will still listen on @key if @event is not NULL, which means
605 + * caller can see a event (by event_cb) while granting any lock with
606 + * the same key at level @dep.
607 + * NB: ALWAYS called holding lhead::lh_lock
608 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
609 + */
610 +static void
611 +htree_node_unlock_internal(struct htree_lock_head *lhead,
612 +                          struct htree_lock *curlk, unsigned dep, void *event)
613 +{
614 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
615 +       struct htree_lock       *grtlk = NULL;
616 +       struct htree_lock_node  *grtln;
617 +       struct htree_lock       *poslk;
618 +       struct htree_lock       *tmplk;
619 +
620 +       if (!htree_node_is_granted(curlk, dep))
621 +               return;
622 +
623 +       if (!list_empty(&curln->ln_granted_list)) {
624 +               /* there is another granted lock */
625 +               grtlk = list_entry(curln->ln_granted_list.next,
626 +                                  struct htree_lock,
627 +                                  lk_nodes[dep].ln_granted_list);
628 +               list_del_init(&curln->ln_granted_list);
629 +       }
630 +
631 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
632 +               /*
633 +                * @curlk is the only granted lock, so we confirmed:
634 +                * a) curln is key owner (attached on major/minor_list),
635 +                *    so if there is any blocked lock, it should be attached
636 +                *    on curln->ln_blocked_list
637 +                * b) we always can grant the first blocked lock
638 +                */
639 +               grtlk = list_entry(curln->ln_blocked_list.next,
640 +                                  struct htree_lock,
641 +                                  lk_nodes[dep].ln_blocked_list);
642 +               BUG_ON(grtlk->lk_task == NULL);
643 +               wake_up_process(grtlk->lk_task);
644 +       }
645 +
646 +       if (event != NULL &&
647 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
648 +               curln->ln_ev_target = event;
649 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
650 +       } else {
651 +               curln->ln_mode = HTREE_LOCK_INVAL;
652 +       }
653 +
654 +       if (grtlk == NULL) { /* I must be the only one locking this key */
655 +               struct htree_lock_node *tmpln;
656 +
657 +               BUG_ON(htree_key_list_empty(curln));
658 +
659 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
660 +                       return;
661 +
662 +               /* not listening */
663 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
664 +                       htree_key_list_del_init(curln);
665 +                       return;
666 +               }
667 +
668 +               tmpln = list_entry(curln->ln_alive_list.next,
669 +                                  struct htree_lock_node, ln_alive_list);
670 +
671 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
672 +
673 +               htree_key_list_replace_init(curln, tmpln);
674 +               list_del_init(&curln->ln_alive_list);
675 +
676 +               return;
677 +       }
678 +
679 +       /* have a granted lock */
680 +       grtln = &grtlk->lk_nodes[dep];
681 +       if (!list_empty(&curln->ln_blocked_list)) {
682 +               /* only key owner can be on both lists */
683 +               BUG_ON(htree_key_list_empty(curln));
684 +
685 +               if (list_empty(&grtln->ln_blocked_list)) {
686 +                       list_add(&grtln->ln_blocked_list,
687 +                                &curln->ln_blocked_list);
688 +               }
689 +               list_del_init(&curln->ln_blocked_list);
690 +       }
691 +       /*
692 +        * NB: this is the tricky part:
693 +        * We have only two modes for child-lock (PR and PW), also,
694 +        * only owner of the key (attached on major/minor_list) can be on
695 +        * both blocked_list and granted_list, so @grtlk must be one
696 +        * of these two cases:
697 +        *
698 +        * a) @grtlk is taken from granted_list, which means we've granted
699 +        *    more than one lock so @grtlk has to be PR, the first blocked
700 +        *    lock must be PW and we can't grant it at all.
701 +        *    So even @grtlk is not owner of the key (empty blocked_list),
702 +        *    we don't care because we can't grant any lock.
703 +        * b) we just grant a new lock which is taken from head of blocked
704 +        *    list, and it should be the first granted lock, and it should
705 +        *    be the first one linked on blocked_list.
706 +        *
707 +        * Either way, we can get correct result by iterating blocked_list
708 +        * of @grtlk, and don't have to bother on how to find out
709 +        * owner of current key.
710 +        */
711 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
712 +                                lk_nodes[dep].ln_blocked_list) {
713 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
714 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
715 +                       break;
716 +               /* grant all readers */
717 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
718 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
719 +                        &grtln->ln_granted_list);
720 +
721 +               BUG_ON(poslk->lk_task == NULL);
722 +               wake_up_process(poslk->lk_task);
723 +       }
724 +
725 +       /* if @curln is the owner of this key, replace it with @grtln */
726 +       if (!htree_key_list_empty(curln))
727 +               htree_key_list_replace_init(curln, grtln);
728 +
729 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
730 +               list_del_init(&curln->ln_alive_list);
731 +}
732 +
733 +/*
734 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
735 + * and 0 only if @wait is false and can't grant it immediately
736 + */
737 +int
738 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
739 +                   u32 key, unsigned dep, int wait, void *event)
740 +{
741 +       struct htree_lock_head *lhead = lck->lk_head;
742 +       int rc;
743 +
744 +       BUG_ON(dep >= lck->lk_depth);
745 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
746 +
747 +       htree_spin_lock(lhead, dep);
748 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
749 +       if (rc != 0)
750 +               htree_spin_unlock(lhead, dep);
751 +       return rc >= 0;
752 +}
753 +EXPORT_SYMBOL(htree_node_lock_try);
754 +
755 +/* it's wrapper of htree_node_unlock_internal */
756 +void
757 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
758 +{
759 +       struct htree_lock_head *lhead = lck->lk_head;
760 +
761 +       BUG_ON(dep >= lck->lk_depth);
762 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
763 +
764 +       htree_spin_lock(lhead, dep);
765 +       htree_node_unlock_internal(lhead, lck, dep, event);
766 +       htree_spin_unlock(lhead, dep);
767 +}
768 +EXPORT_SYMBOL(htree_node_unlock);
769 +
770 +/* stop listening on child-lock level @dep */
771 +void
772 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
773 +{
774 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
775 +       struct htree_lock_node *tmp;
776 +
777 +       BUG_ON(htree_node_is_granted(lck, dep));
778 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
779 +       BUG_ON(!list_empty(&ln->ln_granted_list));
780 +
781 +       if (!htree_node_is_listening(lck, dep))
782 +               return;
783 +
784 +       htree_spin_lock(lck->lk_head, dep);
785 +       ln->ln_mode = HTREE_LOCK_INVAL;
786 +       ln->ln_ev_target = NULL;
787 +
788 +       if (htree_key_list_empty(ln)) { /* not owner */
789 +               list_del_init(&ln->ln_alive_list);
790 +               goto out;
791 +       }
792 +
793 +       /* I'm the owner... */
794 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
795 +               htree_key_list_del_init(ln);
796 +               goto out;
797 +       }
798 +
799 +       tmp = list_entry(ln->ln_alive_list.next,
800 +                        struct htree_lock_node, ln_alive_list);
801 +
802 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
803 +       htree_key_list_replace_init(ln, tmp);
804 +       list_del_init(&ln->ln_alive_list);
805 + out:
806 +       htree_spin_unlock(lck->lk_head, dep);
807 +}
808 +EXPORT_SYMBOL(htree_node_stop_listen);
809 +
810 +/* release all child-locks if we have any */
811 +static void
812 +htree_node_release_all(struct htree_lock *lck)
813 +{
814 +       int     i;
815 +
816 +       for (i = 0; i < lck->lk_depth; i++) {
817 +               if (htree_node_is_granted(lck, i))
818 +                       htree_node_unlock(lck, i, NULL);
819 +               else if (htree_node_is_listening(lck, i))
820 +                       htree_node_stop_listen(lck, i);
821 +       }
822 +}
823 +
824 +/*
825 + * obtain htree lock, it could be blocked inside if there's conflict
826 + * with any granted or blocked lock and @wait is true.
827 + * NB: ALWAYS called holding lhead::lh_lock
828 + */
829 +static int
830 +htree_lock_internal(struct htree_lock *lck, int wait)
831 +{
832 +       struct htree_lock_head *lhead = lck->lk_head;
833 +       int     granted = 0;
834 +       int     blocked = 0;
835 +       int     i;
836 +
837 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
838 +               if (lhead->lh_ngranted[i] != 0)
839 +                       granted |= 1 << i;
840 +               if (lhead->lh_nblocked[i] != 0)
841 +                       blocked |= 1 << i;
842 +       }
843 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
844 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
845 +               /* will block current lock even it just conflicts with any
846 +                * other blocked lock, so lock like EX wouldn't starve */
847 +               if (!wait)
848 +                       return -1;
849 +               lhead->lh_nblocked[lck->lk_mode]++;
850 +               lk_block_inc(lck->lk_mode);
851 +
852 +               lck->lk_task = current;
853 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
854 +
855 +               set_current_state(TASK_UNINTERRUPTIBLE);
856 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
857 +               /* wait to be given the lock */
858 +               if (lck->lk_task != NULL)
859 +                       schedule();
860 +               /* granted, no doubt. wake up will set me RUNNING */
861 +               return 0; /* without lh_lock */
862 +       }
863 +       lhead->lh_ngranted[lck->lk_mode]++;
864 +       lk_grant_inc(lck->lk_mode);
865 +       return 1;
866 +}
867 +
868 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
869 +static void
870 +htree_unlock_internal(struct htree_lock *lck)
871 +{
872 +       struct htree_lock_head *lhead = lck->lk_head;
873 +       struct htree_lock *tmp;
874 +       struct htree_lock *tmp2;
875 +       int granted = 0;
876 +       int i;
877 +
878 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
879 +
880 +       lhead->lh_ngranted[lck->lk_mode]--;
881 +       lck->lk_mode = HTREE_LOCK_INVAL;
882 +
883 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
884 +               if (lhead->lh_ngranted[i] != 0)
885 +                       granted |= 1 << i;
886 +       }
887 +       list_for_each_entry_safe(tmp, tmp2,
888 +                                &lhead->lh_blocked_list, lk_blocked_list) {
889 +               /* conflict with any granted lock? */
890 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
891 +                       break;
892 +
893 +               list_del_init(&tmp->lk_blocked_list);
894 +
895 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
896 +
897 +               lhead->lh_nblocked[tmp->lk_mode]--;
898 +               lhead->lh_ngranted[tmp->lk_mode]++;
899 +               granted |= 1 << tmp->lk_mode;
900 +
901 +               BUG_ON(tmp->lk_task == NULL);
902 +               wake_up_process(tmp->lk_task);
903 +       }
904 +}
905 +
906 +/* it's wrapper of htree_lock_internal and exported interface.
907 + * It always return 1 with granted lock if @wait is true, it can return 0
908 + * if @wait is false and locking request can't be granted immediately */
909 +int
910 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
911 +              htree_lock_mode_t mode, int wait)
912 +{
913 +       int     rc;
914 +
915 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
916 +       BUG_ON(lck->lk_head != NULL);
917 +       BUG_ON(lck->lk_task != NULL);
918 +
919 +       lck->lk_head = lhead;
920 +       lck->lk_mode = mode;
921 +
922 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
923 +       rc = htree_lock_internal(lck, wait);
924 +       if (rc != 0)
925 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
926 +       return rc >= 0;
927 +}
928 +EXPORT_SYMBOL(htree_lock_try);
929 +
930 +/* it's wrapper of htree_unlock_internal and exported interface.
931 + * It will release all htree_node_locks and htree_lock */
932 +void
933 +htree_unlock(struct htree_lock *lck)
934 +{
935 +       BUG_ON(lck->lk_head == NULL);
936 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
937 +
938 +       htree_node_release_all(lck);
939 +
940 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
941 +       htree_unlock_internal(lck);
942 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
943 +       lck->lk_head = NULL;
944 +       lck->lk_task = NULL;
945 +}
946 +EXPORT_SYMBOL(htree_unlock);
947 +
948 +/* change lock mode */
949 +void
950 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
951 +{
952 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
953 +       lck->lk_mode = mode;
954 +}
955 +EXPORT_SYMBOL(htree_change_mode);
956 +
957 +/* release htree lock, and lock it again with new mode.
958 + * This function will first release all htree_node_locks and htree_lock,
959 + * then try to gain htree_lock with new @mode.
960 + * It always return 1 with granted lock if @wait is true, it can return 0
961 + * if @wait is false and locking request can't be granted immediately */
962 +int
963 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
964 +{
965 +       struct htree_lock_head *lhead = lck->lk_head;
966 +       int rc;
967 +
968 +       BUG_ON(lhead == NULL);
969 +       BUG_ON(lck->lk_mode == mode);
970 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
971 +
972 +       htree_node_release_all(lck);
973 +
974 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
975 +       htree_unlock_internal(lck);
976 +       lck->lk_mode = mode;
977 +       rc = htree_lock_internal(lck, wait);
978 +       if (rc != 0)
979 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
980 +       return rc >= 0;
981 +}
982 +EXPORT_SYMBOL(htree_change_lock_try);
983 +
984 +/* create a htree_lock head with @depth levels (number of child-locks),
985 + * it is a per resoruce structure */
986 +struct htree_lock_head *
987 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
988 +{
989 +       struct htree_lock_head *lhead;
990 +       int  i;
991 +
992 +       if (depth > HTREE_LOCK_DEP_MAX) {
993 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
994 +                       depth, HTREE_LOCK_DEP_MAX);
995 +               return NULL;
996 +       }
997 +
998 +       lhead = kzalloc(offsetof(struct htree_lock_head,
999 +                                lh_children[depth]) + priv, GFP_NOFS);
1000 +       if (lhead == NULL)
1001 +               return NULL;
1002 +
1003 +       if (hbits < HTREE_HBITS_MIN)
1004 +               lhead->lh_hbits = HTREE_HBITS_MIN;
1005 +       else if (hbits > HTREE_HBITS_MAX)
1006 +               lhead->lh_hbits = HTREE_HBITS_MAX;
1007 +
1008 +       lhead->lh_lock = 0;
1009 +       lhead->lh_depth = depth;
1010 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
1011 +       if (priv > 0) {
1012 +               lhead->lh_private = (void *)lhead +
1013 +                       offsetof(struct htree_lock_head, lh_children[depth]);
1014 +       }
1015 +
1016 +       for (i = 0; i < depth; i++) {
1017 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
1018 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1019 +       }
1020 +       return lhead;
1021 +}
1022 +EXPORT_SYMBOL(htree_lock_head_alloc);
1023 +
1024 +/* free the htree_lock head */
1025 +void
1026 +htree_lock_head_free(struct htree_lock_head *lhead)
1027 +{
1028 +       int     i;
1029 +
1030 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
1031 +       for (i = 0; i < lhead->lh_depth; i++)
1032 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1033 +       kfree(lhead);
1034 +}
1035 +EXPORT_SYMBOL(htree_lock_head_free);
1036 +
1037 +/* register event callback for @events of child-lock at level @dep */
1038 +void
1039 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1040 +                       unsigned events, htree_event_cb_t callback)
1041 +{
1042 +       BUG_ON(lhead->lh_depth <= dep);
1043 +       lhead->lh_children[dep].lc_events = events;
1044 +       lhead->lh_children[dep].lc_callback = callback;
1045 +}
1046 +EXPORT_SYMBOL(htree_lock_event_attach);
1047 +
1048 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1049 + * extra-bytes as private data for caller */
1050 +struct htree_lock *
1051 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1052 +{
1053 +       struct htree_lock *lck;
1054 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1055 +
1056 +       if (depth > HTREE_LOCK_DEP_MAX) {
1057 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1058 +                       depth, HTREE_LOCK_DEP_MAX);
1059 +               return NULL;
1060 +       }
1061 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1062 +       if (lck == NULL)
1063 +               return NULL;
1064 +
1065 +       if (pbytes != 0)
1066 +               lck->lk_private = (void *)lck + i;
1067 +       lck->lk_mode = HTREE_LOCK_INVAL;
1068 +       lck->lk_depth = depth;
1069 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1070 +
1071 +       for (i = 0; i < depth; i++) {
1072 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1073 +
1074 +               node->ln_mode = HTREE_LOCK_INVAL;
1075 +               INIT_LIST_HEAD(&node->ln_major_list);
1076 +               INIT_LIST_HEAD(&node->ln_minor_list);
1077 +               INIT_LIST_HEAD(&node->ln_alive_list);
1078 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1079 +               INIT_LIST_HEAD(&node->ln_granted_list);
1080 +       }
1081 +
1082 +       return lck;
1083 +}
1084 +EXPORT_SYMBOL(htree_lock_alloc);
1085 +
1086 +/* free htree_lock node */
1087 +void
1088 +htree_lock_free(struct htree_lock *lck)
1089 +{
1090 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1091 +       kfree(lck);
1092 +}
1093 +EXPORT_SYMBOL(htree_lock_free);
1094 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1095 ===================================================================
1096 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile
1097 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1098 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
1099  
1100  ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
1101                 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
1102 +               htree_lock.o \
1103                 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
1104                 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
1105                 xattr_trusted.o inline.o
1106 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1107 ===================================================================
1108 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
1109 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1110 @@ -27,6 +27,7 @@
1111  #include <linux/mutex.h>
1112  #include <linux/timer.h>
1113  #include <linux/wait.h>
1114 +#include <linux/htree_lock.h>
1115  #include <linux/blockgroup_lock.h>
1116  #include <linux/percpu_counter.h>
1117  #include <linux/ratelimit.h>
1118 @@ -821,6 +822,9 @@ struct ext4_inode_info {
1119         __u32   i_dtime;
1120         ext4_fsblk_t    i_file_acl;
1121  
1122 +       /* following fields for parallel directory operations -bzzz */
1123 +       struct semaphore i_append_sem;
1124 +
1125         /*
1126          * i_block_group is the number of the block group which contains
1127          * this file's inode.  Constant across the lifetime of the inode,
1128 @@ -1846,6 +1850,71 @@ struct dx_hash_info
1129   */
1130  #define HASH_NB_ALWAYS         1
1131  
1132 +/* assume name-hash is protected by upper layer */
1133 +#define EXT4_HTREE_LOCK_HASH   0
1134 +
1135 +enum ext4_pdo_lk_types {
1136 +#if EXT4_HTREE_LOCK_HASH
1137 +       EXT4_LK_HASH,
1138 +#endif
1139 +       EXT4_LK_DX,             /* index block */
1140 +       EXT4_LK_DE,             /* directory entry block */
1141 +       EXT4_LK_SPIN,           /* spinlock */
1142 +       EXT4_LK_MAX,
1143 +};
1144 +
1145 +/* read-only bit */
1146 +#define EXT4_LB_RO(b)          (1 << (b))
1147 +/* read + write, high bits for writer */
1148 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1149 +
1150 +enum ext4_pdo_lock_bits {
1151 +       /* DX lock bits */
1152 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
1153 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
1154 +       /* DE lock bits */
1155 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
1156 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
1157 +       /* DX spinlock bits */
1158 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
1159 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
1160 +       /* accurate searching */
1161 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
1162 +};
1163 +
1164 +enum ext4_pdo_lock_opc {
1165 +       /* external */
1166 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1167 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1168 +                                  EXT4_LB_EXACT),
1169 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1170 +                                  EXT4_LB_EXACT),
1171 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1172 +
1173 +       /* internal */
1174 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1175 +                                  EXT4_LB_EXACT),
1176 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1177 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1178 +};
1179 +
1180 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1181 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
1182 +
1183 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1184 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
1185 +
1186 +extern void ext4_htree_lock(struct htree_lock *lck,
1187 +                           struct htree_lock_head *lhead,
1188 +                           struct inode *dir, unsigned flags);
1189 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
1190 +
1191 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
1192 +                                       const struct qstr *d_name,
1193 +                                       struct ext4_dir_entry_2 **res_dir,
1194 +                                       int *inlined, struct htree_lock *lck);
1195 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1196 +                     struct inode *inode, struct htree_lock *lck);
1197  
1198  /*
1199   * Describe an inode's exact location on disk and in memory
1200 @@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in
1201                         const char *name, int namelen, void *data);
1202  static inline void ext4_update_dx_flag(struct inode *inode)
1203  {
1204 +       /* Disable it for ldiskfs, because going from a DX directory to
1205 +        * a non-DX directory while it is in use will completely break
1206 +        * the htree-locking.
1207 +        * If we really want to support this operation in the future,
1208 +        * we need to exclusively lock the directory at here which will
1209 +        * increase complexity of code */
1210 +#if 0
1211         if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1212                                      EXT4_FEATURE_COMPAT_DIR_INDEX))
1213                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1214 +#endif
1215  }
1216  static unsigned char ext4_filetype_table[] = {
1217         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1218 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
1219 ===================================================================
1220 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/namei.c
1221 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
1222 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
1223                                         ext4_lblk_t *block)
1224  {
1225         struct buffer_head *bh;
1226 +       struct ext4_inode_info *ei = EXT4_I(inode);
1227         int err = 0;
1228  
1229         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1230 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
1231                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1232                 return ERR_PTR(-ENOSPC);
1233  
1234 +       /* with parallel dir operations all appends
1235 +       * have to be serialized -bzzz */
1236 +       down(&ei->i_append_sem);
1237 +
1238         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1239  
1240         bh = ext4_bread(handle, inode, *block, 1, &err);
1241 -       if (!bh)
1242 +       if (!bh) {
1243 +               up(&ei->i_append_sem);
1244                 return ERR_PTR(err);
1245 +       }
1246         inode->i_size += inode->i_sb->s_blocksize;
1247         EXT4_I(inode)->i_disksize = inode->i_size;
1248         BUFFER_TRACE(bh, "get_write_access");
1249         err = ext4_journal_get_write_access(handle, bh);
1250 +       up(&ei->i_append_sem);
1251         if (err) {
1252                 brelse(bh);
1253                 ext4_std_error(inode->i_sb, err);
1254 @@ -246,7 +254,7 @@ static struct dx_frame *dx_probe(const s
1255                                  struct inode *dir,
1256                                  struct dx_hash_info *hinfo,
1257                                  struct dx_frame *frame,
1258 -                                int *err);
1259 +                                struct htree_lock *lck, int *err);
1260  static void dx_release(struct dx_frame *frames);
1261  static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1262                        struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1263 @@ -259,13 +267,13 @@ static void dx_insert_block(struct dx_fr
1264  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1265                                  struct dx_frame *frame,
1266                                  struct dx_frame *frames,
1267 -                                __u32 *start_hash);
1268 +                                __u32 *start_hash, struct htree_lock *lck);
1269  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1270                 const struct qstr *d_name,
1271                 struct ext4_dir_entry_2 **res_dir,
1272 -               int *err);
1273 +               struct htree_lock *lck, int *err);
1274  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1275 -                            struct inode *inode);
1276 +                            struct inode *inode, struct htree_lock *lck);
1277  
1278  /* checksumming functions */
1279  void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1280 @@ -668,6 +676,227 @@ struct stats dx_show_entries(struct dx_h
1281  }
1282  #endif /* DX_DEBUG */
1283  
1284 +/* private data for htree_lock */
1285 +struct ext4_dir_lock_data {
1286 +       unsigned                ld_flags;  /* bits-map for lock types */
1287 +       unsigned                ld_count;  /* # entries of the last DX block */
1288 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1289 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1290 +};
1291 +
1292 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
1293 +#define ext4_find_entry(dir, name, dirent, inline) \
1294 +                       __ext4_find_entry(dir, name, dirent, inline, NULL)
1295 +#define ext4_add_entry(handle, dentry, inode) \
1296 +                       __ext4_add_entry(handle, dentry, inode, NULL)
1297 +
1298 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1299 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1300 +
1301 +static void ext4_htree_event_cb(void *target, void *event)
1302 +{
1303 +       u64 *block = (u64 *)target;
1304 +
1305 +       if (*block == dx_get_block((struct dx_entry *)event))
1306 +               *block = EXT4_HTREE_NODE_CHANGED;
1307 +}
1308 +
1309 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1310 +{
1311 +       struct htree_lock_head *lhead;
1312 +
1313 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1314 +       if (lhead != NULL) {
1315 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1316 +                                       ext4_htree_event_cb);
1317 +       }
1318 +       return lhead;
1319 +}
1320 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1321 +
1322 +struct htree_lock *ext4_htree_lock_alloc(void)
1323 +{
1324 +       return htree_lock_alloc(EXT4_LK_MAX,
1325 +                               sizeof(struct ext4_dir_lock_data));
1326 +}
1327 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1328 +
1329 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1330 +{
1331 +       switch (flags) {
1332 +       default: /* 0 or unknown flags require EX lock */
1333 +               return HTREE_LOCK_EX;
1334 +       case EXT4_HLOCK_READDIR:
1335 +               return HTREE_LOCK_PR;
1336 +       case EXT4_HLOCK_LOOKUP:
1337 +               return HTREE_LOCK_CR;
1338 +       case EXT4_HLOCK_DEL:
1339 +       case EXT4_HLOCK_ADD:
1340 +               return HTREE_LOCK_CW;
1341 +       }
1342 +}
1343 +
1344 +/* return PR for read-only operations, otherwise return EX */
1345 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1346 +{
1347 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1348 +
1349 +       /* 0 requires EX lock */
1350 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1351 +}
1352 +
1353 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1354 +{
1355 +       int writer;
1356 +
1357 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1358 +               return 1;
1359 +
1360 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1361 +                EXT4_LB_DE;
1362 +       if (writer) /* all readers & writers are excluded? */
1363 +               return lck->lk_mode == HTREE_LOCK_EX;
1364 +
1365 +       /* all writers are excluded? */
1366 +       return lck->lk_mode == HTREE_LOCK_PR ||
1367 +              lck->lk_mode == HTREE_LOCK_PW ||
1368 +              lck->lk_mode == HTREE_LOCK_EX;
1369 +}
1370 +
1371 +/* relock htree_lock with EX mode if it's change operation, otherwise
1372 + * relock it with PR mode. It's noop if PDO is disabled. */
1373 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1374 +{
1375 +       if (!ext4_htree_safe_locked(lck)) {
1376 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1377 +
1378 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1379 +       }
1380 +}
1381 +
1382 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1383 +                    struct inode *dir, unsigned flags)
1384 +{
1385 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1386 +                                             ext4_htree_safe_mode(flags);
1387 +
1388 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1389 +       htree_lock(lck, lhead, mode);
1390 +       if (!is_dx(dir))
1391 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1392 +}
1393 +EXPORT_SYMBOL(ext4_htree_lock);
1394 +
1395 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1396 +                               unsigned lmask, int wait, void *ev)
1397 +{
1398 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1399 +       u32     mode;
1400 +
1401 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1402 +       if (ext4_htree_safe_locked(lck) ||
1403 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1404 +               return 1;
1405 +
1406 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1407 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1408 +       while (1) {
1409 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1410 +                       return 1;
1411 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1412 +                       return 0;
1413 +               cpu_relax(); /* spin until granted */
1414 +       }
1415 +}
1416 +
1417 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1418 +{
1419 +       return ext4_htree_safe_locked(lck) ||
1420 +              htree_node_is_granted(lck, ffz(~lmask));
1421 +}
1422 +
1423 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1424 +                                  unsigned lmask, void *buf)
1425 +{
1426 +       /* NB: it's safe to call mutiple times or even it's not locked */
1427 +       if (!ext4_htree_safe_locked(lck) &&
1428 +            htree_node_is_granted(lck, ffz(~lmask)))
1429 +               htree_node_unlock(lck, ffz(~lmask), buf);
1430 +}
1431 +
1432 +#define ext4_htree_dx_lock(lck, key)           \
1433 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1434 +#define ext4_htree_dx_lock_try(lck, key)       \
1435 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1436 +#define ext4_htree_dx_unlock(lck)              \
1437 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1438 +#define ext4_htree_dx_locked(lck)              \
1439 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1440 +
1441 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1442 +{
1443 +       struct ext4_dir_lock_data *ld;
1444 +
1445 +       if (ext4_htree_safe_locked(lck))
1446 +               return;
1447 +
1448 +       ld = ext4_htree_lock_data(lck);
1449 +       switch (ld->ld_flags) {
1450 +       default:
1451 +               return;
1452 +       case EXT4_HLOCK_LOOKUP:
1453 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1454 +               return;
1455 +       case EXT4_HLOCK_DEL:
1456 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1457 +               return;
1458 +       case EXT4_HLOCK_ADD:
1459 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1460 +               return;
1461 +       }
1462 +}
1463 +
1464 +#define ext4_htree_de_lock(lck, key)           \
1465 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1466 +#define ext4_htree_de_unlock(lck)              \
1467 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1468 +
1469 +#define ext4_htree_spin_lock(lck, key, event)  \
1470 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1471 +#define ext4_htree_spin_unlock(lck)            \
1472 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1473 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1474 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1475 +
1476 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1477 +{
1478 +       if (!ext4_htree_safe_locked(lck) &&
1479 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1480 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1481 +}
1482 +
1483 +enum {
1484 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1485 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1486 +       DX_HASH_COL_NO,         /* there is no collision */
1487 +};
1488 +
1489 +static int dx_probe_hash_collision(struct htree_lock *lck,
1490 +                                  struct dx_entry *entries,
1491 +                                  struct dx_entry *at, u32 hash)
1492 +{
1493 +       if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1494 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1495 +
1496 +       } else if (at == entries + dx_get_count(entries) - 1) {
1497 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1498 +
1499 +       } else { /* hash collision? */
1500 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1501 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1502 +       }
1503 +}
1504 +
1505  /*
1506   * Probe for a directory leaf block to search.
1507   *
1508 @@ -679,10 +908,11 @@ struct stats dx_show_entries(struct dx_h
1509   */
1510  static struct dx_frame *
1511  dx_probe(const struct qstr *d_name, struct inode *dir,
1512 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1513 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1514 +        struct htree_lock *lck, int *err)
1515  {
1516         unsigned count, indirect;
1517 -       struct dx_entry *at, *entries, *p, *q, *m;
1518 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1519         struct dx_root_info *info;
1520         struct buffer_head *bh;
1521         struct dx_frame *frame = frame_in;
1522 @@ -750,8 +980,15 @@ dx_probe(const struct qstr *d_name, stru
1523         dxtrace(printk("Look up %x", hash));
1524         while (1)
1525         {
1526 +               if (indirect == 0) { /* the last index level */
1527 +                       /* NB: ext4_htree_dx_lock() could be noop if
1528 +                        * DX-lock flag is not set for current operation */
1529 +                       ext4_htree_dx_lock(lck, dx);
1530 +                       ext4_htree_spin_lock(lck, dx, NULL);
1531 +               }
1532                 count = dx_get_count(entries);
1533 -               if (!count || count > dx_get_limit(entries)) {
1534 +               if (count == 0 || count > dx_get_limit(entries)) {
1535 +                       ext4_htree_spin_unlock(lck); /* release spin */
1536                         ext4_warning(dir->i_sb,
1537                                      "dx entry: no count or count > limit");
1538                         brelse(bh);
1539 @@ -792,7 +1029,70 @@ dx_probe(const struct qstr *d_name, stru
1540                 frame->bh = bh;
1541                 frame->entries = entries;
1542                 frame->at = at;
1543 -               if (!indirect--) return frame;
1544 +
1545 +               if (indirect == 0) { /* the last index level */
1546 +                       struct ext4_dir_lock_data *ld;
1547 +                       u64 myblock;
1548 +
1549 +                       /* By default we only lock DE-block, however, we will
1550 +                        * also lock the last level DX-block if:
1551 +                        * a) there is hash collision
1552 +                        *    we will set DX-lock flag (a few lines below)
1553 +                        *    and redo to lock DX-block
1554 +                        *    see detail in dx_probe_hash_collision()
1555 +                        * b) it's a retry from splitting
1556 +                        *    we need to lock the last level DX-block so nobody
1557 +                        *    else can split any leaf blocks under the same
1558 +                        *    DX-block, see detail in ext4_dx_add_entry()
1559 +                        */
1560 +                       if (ext4_htree_dx_locked(lck)) {
1561 +                               /* DX-block is locked, just lock DE-block
1562 +                                * and return */
1563 +                               ext4_htree_spin_unlock(lck);
1564 +                               if (!ext4_htree_safe_locked(lck))
1565 +                                       ext4_htree_de_lock(lck, frame->at);
1566 +                               return frame;
1567 +                       }
1568 +                       /* it's pdirop and no DX lock */
1569 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1570 +                           DX_HASH_COL_YES) {
1571 +                               /* found hash collision, set DX-lock flag
1572 +                                * and retry to abtain DX-lock */
1573 +                               ext4_htree_spin_unlock(lck);
1574 +                               ext4_htree_dx_need_lock(lck);
1575 +                               continue;
1576 +                       }
1577 +                       ld = ext4_htree_lock_data(lck);
1578 +                       /* because I don't lock DX, so @at can't be trusted
1579 +                        * after I release spinlock so I have to save it */
1580 +                       ld->ld_at = at;
1581 +                       ld->ld_at_entry = *at;
1582 +                       ld->ld_count = dx_get_count(entries);
1583 +
1584 +                       frame->at = &ld->ld_at_entry;
1585 +                       myblock = dx_get_block(at);
1586 +
1587 +                       /* NB: ordering locking */
1588 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1589 +                       /* other thread can split this DE-block because:
1590 +                        * a) I don't have lock for the DE-block yet
1591 +                        * b) I released spinlock on DX-block
1592 +                        * if it happened I can detect it by listening
1593 +                        * splitting event on this DE-block */
1594 +                       ext4_htree_de_lock(lck, frame->at);
1595 +                       ext4_htree_spin_stop_listen(lck);
1596 +
1597 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1598 +                               /* someone split this DE-block before
1599 +                                * I locked it, I need to retry and lock
1600 +                                * valid DE-block */
1601 +                               ext4_htree_de_unlock(lck);
1602 +                               continue;
1603 +                       }
1604 +                       return frame;
1605 +               }
1606 +               dx = at;
1607 +               indirect--;
1608                 bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1609                 if (IS_ERR(bh)) {
1610                         *err = PTR_ERR(bh);
1611 @@ -860,7 +1160,7 @@ static void dx_release (struct dx_frame
1612  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1613                                  struct dx_frame *frame,
1614                                  struct dx_frame *frames,
1615 -                                __u32 *start_hash)
1616 +                                __u32 *start_hash, struct htree_lock *lck)
1617  {
1618         struct dx_frame *p;
1619         struct buffer_head *bh;
1620 @@ -875,12 +1175,22 @@ static int ext4_htree_next_block(struct
1621          * this loop, num_frames indicates the number of interior
1622          * nodes need to be read.
1623          */
1624 +       ext4_htree_de_unlock(lck);
1625         while (1) {
1626 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1627 -                       break;
1628 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1629 +                       /* num_frames > 0 :
1630 +                        *   DX block
1631 +                        * ext4_htree_dx_locked:
1632 +                        *   frame->at is reliable pointer returned by dx_probe,
1633 +                        *   otherwise dx_probe already knew no collision */
1634 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1635 +                               break;
1636 +               }
1637                 if (p == frames)
1638                         return 0;
1639                 num_frames++;
1640 +               if (num_frames == 1)
1641 +                       ext4_htree_dx_unlock(lck);
1642                 p--;
1643         }
1644  
1645 @@ -903,6 +1213,13 @@ static int ext4_htree_next_block(struct
1646          * block so no check is necessary
1647          */
1648         while (num_frames--) {
1649 +               if (num_frames == 0) {
1650 +                       /* it's not always necessary, we just don't want to
1651 +                        * detect hash collision again */
1652 +                       ext4_htree_dx_need_lock(lck);
1653 +                       ext4_htree_dx_lock(lck, p->at);
1654 +               }
1655 +
1656                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1657                 if (IS_ERR(bh))
1658                         return PTR_ERR(bh);
1659 @@ -911,6 +1228,7 @@ static int ext4_htree_next_block(struct
1660                 p->bh = bh;
1661                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1662         }
1663 +       ext4_htree_de_lock(lck, p->at);
1664         return 1;
1665  }
1666  
1667 @@ -1013,10 +1331,10 @@ int ext4_htree_fill_tree(struct file *di
1668         }
1669         hinfo.hash = start_hash;
1670         hinfo.minor_hash = 0;
1671 -       frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1672 +       /* assume it's PR locked */
1673 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1674         if (!frame)
1675                 return err;
1676 -
1677         /* Add '.' and '..' from the htree header */
1678         if (!start_hash && !start_minor_hash) {
1679                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1680 @@ -1043,7 +1361,7 @@ int ext4_htree_fill_tree(struct file *di
1681                 count += ret;
1682                 hashval = ~0;
1683                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1684 -                                           frame, frames, &hashval);
1685 +                                           frame, frames, &hashval, NULL);
1686                 *next_hash = hashval;
1687                 if (ret < 0) {
1688                         err = ret;
1689 @@ -1236,10 +1554,10 @@ static int is_dx_internal_node(struct in
1690   * The returned buffer_head has ->b_count elevated.  The caller is expected
1691   * to brelse() it when appropriate.
1692   */
1693 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1694 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1695                                         const struct qstr *d_name,
1696                                         struct ext4_dir_entry_2 **res_dir,
1697 -                                       int *inlined)
1698 +                                       int *inlined, struct htree_lock *lck)
1699  {
1700         struct super_block *sb;
1701         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1702 @@ -1283,7 +1601,7 @@ static struct buffer_head * ext4_find_en
1703                 goto restart;
1704         }
1705         if (is_dx(dir)) {
1706 -               bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1707 +               bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1708                 /*
1709                  * On success, or if the error was file not found,
1710                  * return.  Otherwise, fall back to doing a search the
1711 @@ -1297,6 +1615,7 @@ static struct buffer_head * ext4_find_en
1712                         return bh;
1713                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1714                                "falling back\n"));
1715 +               ext4_htree_safe_relock(lck);
1716         }
1717         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1718         start = EXT4_I(dir)->i_dir_start_lookup;
1719 @@ -1389,9 +1708,12 @@ cleanup_and_exit:
1720                 brelse(bh_use[ra_ptr]);
1721         return ret;
1722  }
1723 +EXPORT_SYMBOL(__ext4_find_entry);
1724  
1725 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1726 -                      struct ext4_dir_entry_2 **res_dir, int *err)
1727 +static struct buffer_head *ext4_dx_find_entry(struct inode *dir,
1728 +                               const struct qstr *d_name,
1729 +                               struct ext4_dir_entry_2 **res_dir,
1730 +                               struct htree_lock *lck, int *err)
1731  {
1732         struct super_block * sb = dir->i_sb;
1733         struct dx_hash_info     hinfo;
1734 @@ -1400,7 +1722,7 @@ static struct buffer_head * ext4_dx_find
1735         ext4_lblk_t block;
1736         int retval;
1737  
1738 -       if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1739 +       if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1740                 return NULL;
1741         do {
1742                 block = dx_get_block(frame->at);
1743 @@ -1424,7 +1746,7 @@ static struct buffer_head * ext4_dx_find
1744  
1745                 /* Check to see if we should continue to search */
1746                 retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1747 -                                              frames, NULL);
1748 +                                              frames, NULL, lck);
1749                 if (retval < 0) {
1750                         ext4_warning(sb,
1751                              "error reading index page in directory #%lu",
1752 @@ -1583,8 +1905,9 @@ static struct ext4_dir_entry_2* dx_pack_
1753   * Returns pointer to de in block into which the new entry will be inserted.
1754   */
1755  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1756 -                       struct buffer_head **bh,struct dx_frame *frame,
1757 -                       struct dx_hash_info *hinfo, int *error)
1758 +                       struct buffer_head **bh, struct dx_frame *frames,
1759 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1760 +                       struct htree_lock *lck, int *error)
1761  {
1762         unsigned blocksize = dir->i_sb->s_blocksize;
1763         unsigned count, continued;
1764 @@ -1647,7 +1970,14 @@ static struct ext4_dir_entry_2 *do_split
1765                                         hash2, split, count-split));
1766  
1767         /* Fancy dance to stay within two buffers */
1768 -       de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1769 +       if (hinfo->hash < hash2) {
1770 +               de2 = dx_move_dirents(data1, data2, map + split,
1771 +                                     count - split, blocksize);
1772 +       } else {
1773 +               /* make sure we will add entry to the same block which
1774 +                * we have already locked */
1775 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1776 +       }
1777         de = dx_pack_dirents(data1, blocksize);
1778         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1779                                            (char *) de,
1780 @@ -1666,13 +1996,21 @@ static struct ext4_dir_entry_2 *do_split
1781         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1782         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1783  
1784 -       /* Which block gets the new entry? */
1785 -       if (hinfo->hash >= hash2)
1786 -       {
1787 -               swap(*bh, bh2);
1788 -               de = de2;
1789 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1790 +                            frame->at); /* notify block is being split */
1791 +       if (hinfo->hash < hash2) {
1792 +               dx_insert_block(frame, hash2 + continued, newblock);
1793 +
1794 +       } else {
1795 +               /* switch block number */
1796 +               dx_insert_block(frame, hash2 + continued,
1797 +                               dx_get_block(frame->at));
1798 +               dx_set_block(frame->at, newblock);
1799 +               (frame->at)++;
1800         }
1801 -       dx_insert_block(frame, hash2 + continued, newblock);
1802 +       ext4_htree_spin_unlock(lck);
1803 +       ext4_htree_dx_unlock(lck);
1804 +
1805         err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1806         if (err)
1807                 goto journal_error;
1808 @@ -1945,7 +2283,7 @@ static int make_indexed_dir(handle_t *ha
1809         ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1810         ext4_handle_dirty_dirent_node(handle, dir, bh);
1811  
1812 -       de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1813 +       de = do_split(handle, dir, &bh, frames, frame, &hinfo, NULL, &retval);
1814         if (!de) {
1815                 /*
1816                  * Even if the block split failed, we have to properly write
1817 @@ -2051,8 +2389,8 @@ out:
1818   * may not sleep between calling this and putting something into
1819   * the entry, as someone else might have used it while you slept.
1820   */
1821 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1822 -                         struct inode *inode)
1823 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1824 +                     struct inode *inode, struct htree_lock *lck)
1825  {
1826         struct inode *dir = dentry->d_parent->d_inode;
1827         struct buffer_head *bh;
1828 @@ -2087,9 +2425,10 @@ static int ext4_add_entry(handle_t *hand
1829                 if (dentry->d_name.len == 2 &&
1830                     memcmp(dentry->d_name.name, "..", 2) == 0)
1831                         return ext4_update_dotdot(handle, dentry, inode);
1832 -               retval = ext4_dx_add_entry(handle, dentry, inode);
1833 +               retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1834                 if (!retval || (retval != ERR_BAD_DX_DIR))
1835                         goto out;
1836 +               ext4_htree_safe_relock(lck);
1837                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1838                 dx_fallback++;
1839                 ext4_mark_inode_dirty(handle, dir);
1840 @@ -2129,12 +2468,13 @@ static int ext4_add_entry(handle_t *hand
1841                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1842         return retval;
1843  }
1844 +EXPORT_SYMBOL(__ext4_add_entry);
1845  
1846  /*
1847   * Returns 0 for success, or a negative error value
1848   */
1849  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1850 -                            struct inode *inode)
1851 +                            struct inode *inode, struct htree_lock *lck)
1852  {
1853         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1854         struct dx_entry *entries, *at;
1855 @@ -2148,7 +2488,7 @@ static int ext4_dx_add_entry(handle_t *h
1856  
1857  again:
1858         restart = 0;
1859 -       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1860 +       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1861         if (!frame)
1862                 return err;
1863         entries = frame->entries;
1864 @@ -2178,6 +2518,11 @@ again:
1865                 struct dx_node *node2;
1866                 struct buffer_head *bh2;
1867  
1868 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1869 +                       ext4_htree_safe_relock(lck);
1870 +                       restart = 1;
1871 +                       goto cleanup;
1872 +               }
1873                 while (frame > frames) {
1874                         if (dx_get_count((frame - 1)->entries) <
1875                             dx_get_limit((frame - 1)->entries)) {
1876 @@ -2277,16 +2622,43 @@ again:
1877                         restart = 1;
1878                         goto cleanup;
1879                 }
1880 +       } else if (!ext4_htree_dx_locked(lck)) {
1881 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1882 +
1883 +               /* not well protected, require DX lock */
1884 +               ext4_htree_dx_need_lock(lck);
1885 +               at = frame > frames ? (frame - 1)->at : NULL;
1886 +
1887 +               /* NB: no risk of deadlock because it's just a try.
1888 +                *
1889 +                * NB: we check ld_count for twice, the first time before
1890 +                * having DX lock, the second time after holding DX lock.
1891 +                *
1892 +                * NB: We never free blocks for directory so far, which
1893 +                * means value returned by dx_get_count() should equal to
1894 +                * ld->ld_count if nobody split any DE-block under @at,
1895 +                * and ld->ld_at still points to valid dx_entry. */
1896 +               if ((ld->ld_count != dx_get_count(entries)) ||
1897 +                   !ext4_htree_dx_lock_try(lck, at) ||
1898 +                   (ld->ld_count != dx_get_count(entries))) {
1899 +                       restart = 1;
1900 +                       goto cleanup;
1901 +               }
1902 +               /* OK, I've got DX lock and nothing changed */
1903 +               frame->at = ld->ld_at;
1904         }
1905 -       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1906 +       de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
1907         if (!de)
1908                 goto cleanup;
1909 +
1910         err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1911         goto cleanup;
1912  
1913  journal_error:
1914         ext4_std_error(dir->i_sb, err);
1915  cleanup:
1916 +       ext4_htree_dx_unlock(lck);
1917 +       ext4_htree_de_unlock(lck);
1918         brelse(bh);
1919         dx_release(frames);
1920         /* @restart is true means htree-path has been changed, we need to
1921 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1922 ===================================================================
1923 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c
1924 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1925 @@ -875,5 +875,6 @@ static struct inode *ext4_alloc_inode(st
1926  
1927         ei->vfs_inode.i_version = 1;
1928 +       sema_init(&ei->i_append_sem, 1);
1929         INIT_LIST_HEAD(&ei->i_prealloc_list);
1930         spin_lock_init(&ei->i_prealloc_lock);
1931         ext4_es_init_tree(&ei->i_es_tree);