Whamcloud - gitweb
LU-12225 obdclass: improve jobid memory reclaim policy
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel6.9 / ext4-pdirop.patch
1 Index: linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
2 ===================================================================
3 --- /dev/null
4 +++ linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
5 @@ -0,0 +1,187 @@
6 +/*
7 + * include/linux/htree_lock.h
8 + *
9 + * Copyright (c) 2011, 2012, Intel Corporation.
10 + *
11 + * Author: Liang Zhen <liang@whamcloud.com>
12 + */
13 +
14 +/*
15 + * htree lock
16 + *
17 + * htree_lock is an advanced lock, it can support five lock modes (concept is
18 + * taken from DLM) and it's a sleeping lock.
19 + *
20 + * most common use case is:
21 + * - create a htree_lock_head for data
22 + * - each thread (contender) creates it's own htree_lock
23 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
24 + *   call htree_unlock to release lock
25 + *
26 + * Also, there is advanced use-case which is more complex, user can have
27 + * PW/PR lock on particular key, it's mostly used while user holding shared
28 + * lock on the htree (CW, CR)
29 + *
30 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
31 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
32 + * ...
33 + * htree_node_unlock(lock_node);; unlock the key
34 + *
35 + * Another tip is, we can have N-levels of this kind of keys, all we need to
36 + * do is specifying N-levels while creating htree_lock_head, then we can
37 + * lock/unlock a specific level by:
38 + * htree_node_lock(lock_node, mode1, key1, level1...);
39 + * do something;
40 + * htree_node_lock(lock_node, mode1, key2, level2...);
41 + * do something;
42 + * htree_node_unlock(lock_node, level2);
43 + * htree_node_unlock(lock_node, level1);
44 + *
45 + * NB: for multi-level, should be careful about locking order to avoid deadlock
46 + */
47 +
48 +#ifndef _LINUX_HTREE_LOCK_H
49 +#define _LINUX_HTREE_LOCK_H
50 +
51 +#include <linux/list.h>
52 +#include <linux/spinlock.h>
53 +#include <linux/sched.h>
54 +
55 +/*
56 + * Lock Modes
57 + * more details can be found here:
58 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
59 + */
60 +typedef enum {
61 +       HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
62 +       HTREE_LOCK_PW,       /* protected write: allows only CR users */
63 +       HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
64 +       HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
65 +       HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
66 +       HTREE_LOCK_MAX,      /* number of lock modes */
67 +} htree_lock_mode_t;
68 +
69 +#define HTREE_LOCK_NL          HTREE_LOCK_MAX
70 +#define HTREE_LOCK_INVAL       0xdead10c
71 +
72 +enum {
73 +       HTREE_HBITS_MIN         = 2,
74 +       HTREE_HBITS_DEF         = 14,
75 +       HTREE_HBITS_MAX         = 32,
76 +};
77 +
78 +enum {
79 +       HTREE_EVENT_DISABLE     = (0),
80 +       HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
81 +       HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
82 +       HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
83 +};
84 +
85 +struct htree_lock;
86 +
87 +typedef void (*htree_event_cb_t)(void *target, void *event);
88 +
89 +struct htree_lock_child {
90 +       struct list_head        lc_list;        /* granted list */
91 +       htree_event_cb_t        lc_callback;    /* event callback */
92 +       unsigned                lc_events;      /* event types */
93 +};
94 +
95 +struct htree_lock_head {
96 +       unsigned long           lh_lock;        /* bits lock */
97 +       /* blocked lock list (htree_lock) */
98 +       struct list_head        lh_blocked_list;
99 +       /* # key levels */
100 +       u16                     lh_depth;
101 +       /* hash bits for key and limit number of locks */
102 +       u16                     lh_hbits;
103 +       /* counters for blocked locks */
104 +       u16                     lh_nblocked[HTREE_LOCK_MAX];
105 +       /* counters for granted locks */
106 +       u16                     lh_ngranted[HTREE_LOCK_MAX];
107 +       /* private data */
108 +       void                    *lh_private;
109 +       /* array of children locks */
110 +       struct htree_lock_child lh_children[0];
111 +};
112 +
113 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
114 +struct htree_lock_node {
115 +       htree_lock_mode_t       ln_mode;
116 +       /* major hash key */
117 +       u16                     ln_major_key;
118 +       /* minor hash key */
119 +       u16                     ln_minor_key;
120 +       struct list_head        ln_major_list;
121 +       struct list_head        ln_minor_list;
122 +       /* alive list, all locks (granted, blocked, listening) are on it */
123 +       struct list_head        ln_alive_list;
124 +       /* blocked list */
125 +       struct list_head        ln_blocked_list;
126 +       /* granted list */
127 +       struct list_head        ln_granted_list;
128 +       void                    *ln_ev_target;
129 +};
130 +
131 +struct htree_lock {
132 +       struct task_struct      *lk_task;
133 +       struct htree_lock_head  *lk_head;
134 +       void                    *lk_private;
135 +       unsigned                lk_depth;
136 +       htree_lock_mode_t       lk_mode;
137 +       struct list_head        lk_blocked_list;
138 +       struct htree_lock_node  lk_nodes[0];
139 +};
140 +
141 +/* create a lock head, which stands for a resource */
142 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
143 +                                             unsigned hbits, unsigned priv);
144 +/* free a lock head */
145 +void htree_lock_head_free(struct htree_lock_head *lhead);
146 +/* register event callback for child lock at level @depth */
147 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
148 +                            unsigned events, htree_event_cb_t callback);
149 +/* create a lock handle, which stands for a thread */
150 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
151 +/* free a lock handle */
152 +void htree_lock_free(struct htree_lock *lck);
153 +/* lock htree, when @wait is true, 0 is returned if the lock can't
154 + * be granted immediately */
155 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
156 +                  htree_lock_mode_t mode, int wait);
157 +/* unlock htree */
158 +void htree_unlock(struct htree_lock *lck);
159 +/* unlock and relock htree with @new_mode */
160 +int htree_change_lock_try(struct htree_lock *lck,
161 +                         htree_lock_mode_t new_mode, int wait);
162 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
163 +/* require child lock (key) of htree at level @dep, @event will be sent to all
164 + * listeners on this @key while lock being granted */
165 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
166 +                       u32 key, unsigned dep, int wait, void *event);
167 +/* release child lock at level @dep, this lock will listen on it's key
168 + * if @event isn't NULL, event_cb will be called against @lck while granting
169 + * any other lock at level @dep with the same key */
170 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
171 +/* stop listening on child lock at level @dep */
172 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
173 +/* for debug */
174 +void htree_lock_stat_print(int depth);
175 +void htree_lock_stat_reset(void);
176 +
177 +#define htree_lock(lck, lh, mode)      htree_lock_try(lck, lh, mode, 1)
178 +#define htree_change_lock(lck, mode)   htree_change_lock_try(lck, mode, 1)
179 +
180 +#define htree_lock_mode(lck)           ((lck)->lk_mode)
181 +
182 +#define htree_node_lock(lck, mode, key, dep)   \
183 +       htree_node_lock_try(lck, mode, key, dep, 1, NULL)
184 +/* this is only safe in thread context of lock owner */
185 +#define htree_node_is_granted(lck, dep)                \
186 +       ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
187 +        (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
188 +/* this is only safe in thread context of lock owner */
189 +#define htree_node_is_listening(lck, dep)      \
190 +       ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
191 +
192 +#endif
193 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
194 ===================================================================
195 --- /dev/null
196 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
197 @@ -0,0 +1,880 @@
198 +/*
199 + * fs/ext4/htree_lock.c
200 + *
201 + * Copyright (c) 2011, 2012, Intel Corporation.
202 + *
203 + * Author: Liang Zhen <liang@whamcloud.com>
204 + */
205 +#include <linux/jbd2.h>
206 +#include <linux/hash.h>
207 +#include <linux/module.h>
208 +#include <linux/htree_lock.h>
209 +
210 +enum {
211 +       HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
212 +       HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
213 +       HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
214 +       HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
215 +       HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
216 +};
217 +
218 +enum {
219 +       HTREE_LOCK_COMPAT_EX    = 0,
220 +       HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
221 +       HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
222 +       HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
223 +       HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
224 +                                 HTREE_LOCK_BIT_PW,
225 +};
226 +
227 +static int htree_lock_compat[] = {
228 +       [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
229 +       [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
230 +       [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
231 +       [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
232 +       [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
233 +};
234 +
235 +/* max allowed htree-lock depth.
236 + * We only need depth=3 for ext4 although user can have higher value. */
237 +#define HTREE_LOCK_DEP_MAX     16
238 +
239 +#ifdef HTREE_LOCK_DEBUG
240 +
241 +static char *hl_name[] = {
242 +       [HTREE_LOCK_EX]         "EX",
243 +       [HTREE_LOCK_PW]         "PW",
244 +       [HTREE_LOCK_PR]         "PR",
245 +       [HTREE_LOCK_CW]         "CW",
246 +       [HTREE_LOCK_CR]         "CR",
247 +};
248 +
249 +/* lock stats */
250 +struct htree_lock_node_stats {
251 +       unsigned long long      blocked[HTREE_LOCK_MAX];
252 +       unsigned long long      granted[HTREE_LOCK_MAX];
253 +       unsigned long long      retried[HTREE_LOCK_MAX];
254 +       unsigned long long      events;
255 +};
256 +
257 +struct htree_lock_stats {
258 +       struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
259 +       unsigned long long      granted[HTREE_LOCK_MAX];
260 +       unsigned long long      blocked[HTREE_LOCK_MAX];
261 +};
262 +
263 +static struct htree_lock_stats hl_stats;
264 +
265 +void htree_lock_stat_reset(void)
266 +{
267 +       memset(&hl_stats, 0, sizeof(hl_stats));
268 +}
269 +
270 +void htree_lock_stat_print(int depth)
271 +{
272 +       int     i;
273 +       int     j;
274 +
275 +       printk(KERN_DEBUG "HTREE LOCK STATS:\n");
276 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
277 +               printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
278 +                      hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
279 +       }
280 +       for (i = 0; i < depth; i++) {
281 +               printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
282 +               for (j = 0; j < HTREE_LOCK_MAX; j++) {
283 +                       printk(KERN_DEBUG
284 +                               "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
285 +                               hl_name[j], hl_stats.nodes[i].granted[j],
286 +                               hl_stats.nodes[i].blocked[j],
287 +                               hl_stats.nodes[i].retried[j]);
288 +               }
289 +       }
290 +}
291 +
292 +#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
293 +#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
294 +#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
295 +#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
296 +#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
297 +#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
298 +
299 +#else /* !DEBUG */
300 +
301 +void htree_lock_stat_reset(void) {}
302 +void htree_lock_stat_print(int depth) {}
303 +
304 +#define lk_grant_inc(m)              do {} while (0)
305 +#define lk_block_inc(m)              do {} while (0)
306 +#define ln_grant_inc(d, m)    do {} while (0)
307 +#define ln_block_inc(d, m)    do {} while (0)
308 +#define ln_retry_inc(d, m)    do {} while (0)
309 +#define ln_event_inc(d)              do {} while (0)
310 +
311 +#endif /* DEBUG */
312 +
313 +EXPORT_SYMBOL(htree_lock_stat_reset);
314 +EXPORT_SYMBOL(htree_lock_stat_print);
315 +
316 +#define HTREE_DEP_ROOT           (-1)
317 +
318 +#define htree_spin_lock(lhead, dep)                            \
319 +       bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
320 +#define htree_spin_unlock(lhead, dep)                          \
321 +       bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
322 +
323 +#define htree_key_event_ignore(child, ln)                      \
324 +       (!((child)->lc_events & (1 << (ln)->ln_mode)))
325 +
326 +static int
327 +htree_key_list_empty(struct htree_lock_node *ln)
328 +{
329 +       return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
330 +}
331 +
332 +static void
333 +htree_key_list_del_init(struct htree_lock_node *ln)
334 +{
335 +       struct htree_lock_node *tmp = NULL;
336 +
337 +       if (!list_empty(&ln->ln_minor_list)) {
338 +               tmp = list_entry(ln->ln_minor_list.next,
339 +                                struct htree_lock_node, ln_minor_list);
340 +               list_del_init(&ln->ln_minor_list);
341 +       }
342 +
343 +       if (list_empty(&ln->ln_major_list))
344 +               return;
345 +
346 +       if (tmp == NULL) { /* not on minor key list */
347 +               list_del_init(&ln->ln_major_list);
348 +       } else {
349 +               BUG_ON(!list_empty(&tmp->ln_major_list));
350 +               list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
351 +       }
352 +}
353 +
354 +static void
355 +htree_key_list_replace_init(struct htree_lock_node *old,
356 +                           struct htree_lock_node *new)
357 +{
358 +       if (!list_empty(&old->ln_major_list))
359 +               list_replace_init(&old->ln_major_list, &new->ln_major_list);
360 +
361 +       if (!list_empty(&old->ln_minor_list))
362 +               list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
363 +}
364 +
365 +static void
366 +htree_key_event_enqueue(struct htree_lock_child *child,
367 +                       struct htree_lock_node *ln, int dep, void *event)
368 +{
369 +       struct htree_lock_node *tmp;
370 +
371 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
372 +       BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
373 +       if (event == NULL || htree_key_event_ignore(child, ln))
374 +               return;
375 +
376 +       /* shouldn't be a very long list */
377 +       list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
378 +               if (tmp->ln_mode == HTREE_LOCK_NL) {
379 +                       ln_event_inc(dep);
380 +                       if (child->lc_callback != NULL)
381 +                               child->lc_callback(tmp->ln_ev_target, event);
382 +               }
383 +       }
384 +}
385 +
386 +static int
387 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
388 +                       unsigned dep, int wait, void *event)
389 +{
390 +       struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
391 +       struct htree_lock_node *newln = &newlk->lk_nodes[dep];
392 +       struct htree_lock_node *curln = &curlk->lk_nodes[dep];
393 +
394 +       /* NB: ALWAYS called holding lhead::lh_lock(dep) */
395 +       /* NB: we only expect PR/PW lock mode at here, only these two modes are
396 +        * allowed for htree_node_lock(asserted in htree_node_lock_internal),
397 +        * NL is only used for listener, user can't directly require NL mode */
398 +       if ((curln->ln_mode == HTREE_LOCK_NL) ||
399 +           (curln->ln_mode != HTREE_LOCK_PW &&
400 +            newln->ln_mode != HTREE_LOCK_PW)) {
401 +               /* no conflict, attach it on granted list of @curlk */
402 +               if (curln->ln_mode != HTREE_LOCK_NL) {
403 +                       list_add(&newln->ln_granted_list,
404 +                                &curln->ln_granted_list);
405 +               } else {
406 +                       /* replace key owner */
407 +                       htree_key_list_replace_init(curln, newln);
408 +               }
409 +
410 +               list_add(&newln->ln_alive_list, &curln->ln_alive_list);
411 +               htree_key_event_enqueue(child, newln, dep, event);
412 +               ln_grant_inc(dep, newln->ln_mode);
413 +               return 1; /* still hold lh_lock */
414 +       }
415 +
416 +       if (!wait) { /* can't grant and don't want to wait */
417 +               ln_retry_inc(dep, newln->ln_mode);
418 +               newln->ln_mode = HTREE_LOCK_INVAL;
419 +               return -1; /* don't wait and just return -1 */
420 +       }
421 +
422 +       newlk->lk_task = current;
423 +       set_current_state(TASK_UNINTERRUPTIBLE);
424 +       /* conflict, attach it on blocked list of curlk */
425 +       list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
426 +       list_add(&newln->ln_alive_list, &curln->ln_alive_list);
427 +       ln_block_inc(dep, newln->ln_mode);
428 +
429 +       htree_spin_unlock(newlk->lk_head, dep);
430 +       /* wait to be given the lock */
431 +       if (newlk->lk_task != NULL)
432 +               schedule();
433 +       /* granted, no doubt, wake up will set me RUNNING */
434 +       if (event == NULL || htree_key_event_ignore(child, newln))
435 +               return 0; /* granted without lh_lock */
436 +
437 +       htree_spin_lock(newlk->lk_head, dep);
438 +       htree_key_event_enqueue(child, newln, dep, event);
439 +       return 1; /* still hold lh_lock */
440 +}
441 +
442 +/*
443 + * get PR/PW access to particular tree-node according to @dep and @key,
444 + * it will return -1 if @wait is false and can't immediately grant this lock.
445 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
446 + * @event if it's not NULL.
447 + * NB: ALWAYS called holding lhead::lh_lock
448 + */
449 +static int
450 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
451 +                        htree_lock_mode_t mode, u32 key, unsigned dep,
452 +                        int wait, void *event)
453 +{
454 +       LIST_HEAD               (list);
455 +       struct htree_lock       *tmp;
456 +       struct htree_lock       *tmp2;
457 +       u16                     major;
458 +       u16                     minor;
459 +       u8                      reverse;
460 +       u8                      ma_bits;
461 +       u8                      mi_bits;
462 +
463 +       BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
464 +       BUG_ON(htree_node_is_granted(lck, dep));
465 +
466 +       key = hash_long(key, lhead->lh_hbits);
467 +
468 +       mi_bits = lhead->lh_hbits >> 1;
469 +       ma_bits = lhead->lh_hbits - mi_bits;
470 +
471 +       lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
472 +       lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
473 +       lck->lk_nodes[dep].ln_mode = mode;
474 +
475 +       /*
476 +        * The major key list is an ordered list, so searches are started
477 +        * at the end of the list that is numerically closer to major_key,
478 +        * so at most half of the list will be walked (for well-distributed
479 +        * keys). The list traversal aborts early if the expected key
480 +        * location is passed.
481 +        */
482 +       reverse = (major >= (1 << (ma_bits - 1)));
483 +
484 +       if (reverse) {
485 +               list_for_each_entry_reverse(tmp,
486 +                                       &lhead->lh_children[dep].lc_list,
487 +                                       lk_nodes[dep].ln_major_list) {
488 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
489 +                               goto search_minor;
490 +
491 +                       } else if (tmp->lk_nodes[dep].ln_major_key < major) {
492 +                               /* attach _after_ @tmp */
493 +                               list_add(&lck->lk_nodes[dep].ln_major_list,
494 +                                        &tmp->lk_nodes[dep].ln_major_list);
495 +                               goto out_grant_major;
496 +                       }
497 +               }
498 +
499 +               list_add(&lck->lk_nodes[dep].ln_major_list,
500 +                        &lhead->lh_children[dep].lc_list);
501 +               goto out_grant_major;
502 +
503 +       } else {
504 +               list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
505 +                                   lk_nodes[dep].ln_major_list) {
506 +                       if (tmp->lk_nodes[dep].ln_major_key == major) {
507 +                               goto search_minor;
508 +
509 +                       } else if (tmp->lk_nodes[dep].ln_major_key > major) {
510 +                               /* insert _before_ @tmp */
511 +                               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
512 +                                       &tmp->lk_nodes[dep].ln_major_list);
513 +                               goto out_grant_major;
514 +                       }
515 +               }
516 +
517 +               list_add_tail(&lck->lk_nodes[dep].ln_major_list,
518 +                             &lhead->lh_children[dep].lc_list);
519 +               goto out_grant_major;
520 +       }
521 +
522 + search_minor:
523 +       /*
524 +        * NB: minor_key list doesn't have a "head", @list is just a
525 +        * temporary stub for helping list searching, make sure it's removed
526 +        * after searching.
527 +        * minor_key list is an ordered list too.
528 +        */
529 +       list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
530 +
531 +       reverse = (minor >= (1 << (mi_bits - 1)));
532 +
533 +       if (reverse) {
534 +               list_for_each_entry_reverse(tmp2, &list,
535 +                                           lk_nodes[dep].ln_minor_list) {
536 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
537 +                               goto out_enqueue;
538 +
539 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
540 +                               /* attach _after_ @tmp2 */
541 +                               list_add(&lck->lk_nodes[dep].ln_minor_list,
542 +                                        &tmp2->lk_nodes[dep].ln_minor_list);
543 +                               goto out_grant_minor;
544 +                       }
545 +               }
546 +
547 +               list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
548 +
549 +       } else {
550 +               list_for_each_entry(tmp2, &list,
551 +                                   lk_nodes[dep].ln_minor_list) {
552 +                       if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
553 +                               goto out_enqueue;
554 +
555 +                       } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
556 +                               /* insert _before_ @tmp2 */
557 +                               list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
558 +                                       &tmp2->lk_nodes[dep].ln_minor_list);
559 +                               goto out_grant_minor;
560 +                       }
561 +               }
562 +
563 +               list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
564 +       }
565 +
566 + out_grant_minor:
567 +       if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
568 +               /* new lock @lck is the first one on minor_key list, which
569 +                * means it has the smallest minor_key and it should
570 +                * replace @tmp as minor_key owner */
571 +               list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
572 +                                 &lck->lk_nodes[dep].ln_major_list);
573 +       }
574 +       /* remove the temporary head */
575 +       list_del(&list);
576 +
577 + out_grant_major:
578 +       ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
579 +       return 1; /* granted with holding lh_lock */
580 +
581 + out_enqueue:
582 +       list_del(&list); /* remove temprary head */
583 +       return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
584 +}
585 +
586 +/*
587 + * release the key of @lck at level @dep, and grant any blocked locks.
588 + * caller will still listen on @key if @event is not NULL, which means
589 + * caller can see a event (by event_cb) while granting any lock with
590 + * the same key at level @dep.
591 + * NB: ALWAYS called holding lhead::lh_lock
592 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
593 + */
594 +static void
595 +htree_node_unlock_internal(struct htree_lock_head *lhead,
596 +                          struct htree_lock *curlk, unsigned dep, void *event)
597 +{
598 +       struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
599 +       struct htree_lock       *grtlk = NULL;
600 +       struct htree_lock_node  *grtln;
601 +       struct htree_lock       *poslk;
602 +       struct htree_lock       *tmplk;
603 +
604 +       if (!htree_node_is_granted(curlk, dep))
605 +               return;
606 +
607 +       if (!list_empty(&curln->ln_granted_list)) {
608 +               /* there is another granted lock */
609 +               grtlk = list_entry(curln->ln_granted_list.next,
610 +                                  struct htree_lock,
611 +                                  lk_nodes[dep].ln_granted_list);
612 +               list_del_init(&curln->ln_granted_list);
613 +       }
614 +
615 +       if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
616 +               /*
617 +                * @curlk is the only granted lock, so we confirmed:
618 +                * a) curln is key owner (attached on major/minor_list),
619 +                *    so if there is any blocked lock, it should be attached
620 +                *    on curln->ln_blocked_list
621 +                * b) we always can grant the first blocked lock
622 +                */
623 +               grtlk = list_entry(curln->ln_blocked_list.next,
624 +                                  struct htree_lock,
625 +                                  lk_nodes[dep].ln_blocked_list);
626 +               BUG_ON(grtlk->lk_task == NULL);
627 +               wake_up_process(grtlk->lk_task);
628 +       }
629 +
630 +       if (event != NULL &&
631 +           lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
632 +               curln->ln_ev_target = event;
633 +               curln->ln_mode = HTREE_LOCK_NL; /* listen! */
634 +       } else {
635 +               curln->ln_mode = HTREE_LOCK_INVAL;
636 +       }
637 +
638 +       if (grtlk == NULL) { /* I must be the only one locking this key */
639 +               struct htree_lock_node *tmpln;
640 +
641 +               BUG_ON(htree_key_list_empty(curln));
642 +
643 +               if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
644 +                       return;
645 +
646 +               /* not listening */
647 +               if (list_empty(&curln->ln_alive_list)) { /* no more listener */
648 +                       htree_key_list_del_init(curln);
649 +                       return;
650 +               }
651 +
652 +               tmpln = list_entry(curln->ln_alive_list.next,
653 +                                  struct htree_lock_node, ln_alive_list);
654 +
655 +               BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
656 +
657 +               htree_key_list_replace_init(curln, tmpln);
658 +               list_del_init(&curln->ln_alive_list);
659 +
660 +               return;
661 +       }
662 +
663 +       /* have a granted lock */
664 +       grtln = &grtlk->lk_nodes[dep];
665 +       if (!list_empty(&curln->ln_blocked_list)) {
666 +               /* only key owner can be on both lists */
667 +               BUG_ON(htree_key_list_empty(curln));
668 +
669 +               if (list_empty(&grtln->ln_blocked_list)) {
670 +                       list_add(&grtln->ln_blocked_list,
671 +                                &curln->ln_blocked_list);
672 +               }
673 +               list_del_init(&curln->ln_blocked_list);
674 +       }
675 +       /*
676 +        * NB: this is the tricky part:
677 +        * We have only two modes for child-lock (PR and PW), also,
678 +        * only owner of the key (attached on major/minor_list) can be on
679 +        * both blocked_list and granted_list, so @grtlk must be one
680 +        * of these two cases:
681 +        *
682 +        * a) @grtlk is taken from granted_list, which means we've granted
683 +        *    more than one lock so @grtlk has to be PR, the first blocked
684 +        *    lock must be PW and we can't grant it at all.
685 +        *    So even @grtlk is not owner of the key (empty blocked_list),
686 +        *    we don't care because we can't grant any lock.
687 +        * b) we just grant a new lock which is taken from head of blocked
688 +        *    list, and it should be the first granted lock, and it should
689 +        *    be the first one linked on blocked_list.
690 +        *
691 +        * Either way, we can get correct result by iterating blocked_list
692 +        * of @grtlk, and don't have to bother on how to find out
693 +        * owner of current key.
694 +        */
695 +       list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
696 +                                lk_nodes[dep].ln_blocked_list) {
697 +               if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
698 +                   poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
699 +                       break;
700 +               /* grant all readers */
701 +               list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
702 +               list_add(&poslk->lk_nodes[dep].ln_granted_list,
703 +                        &grtln->ln_granted_list);
704 +
705 +               BUG_ON(poslk->lk_task == NULL);
706 +               wake_up_process(poslk->lk_task);
707 +       }
708 +
709 +       /* if @curln is the owner of this key, replace it with @grtln */
710 +       if (!htree_key_list_empty(curln))
711 +               htree_key_list_replace_init(curln, grtln);
712 +
713 +       if (curln->ln_mode == HTREE_LOCK_INVAL)
714 +               list_del_init(&curln->ln_alive_list);
715 +}
716 +
717 +/*
718 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
719 + * and 0 only if @wait is false and can't grant it immediately
720 + */
721 +int
722 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
723 +                   u32 key, unsigned dep, int wait, void *event)
724 +{
725 +       struct htree_lock_head *lhead = lck->lk_head;
726 +       int rc;
727 +
728 +       BUG_ON(dep >= lck->lk_depth);
729 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
730 +
731 +       htree_spin_lock(lhead, dep);
732 +       rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
733 +       if (rc != 0)
734 +               htree_spin_unlock(lhead, dep);
735 +       return rc >= 0;
736 +}
737 +EXPORT_SYMBOL(htree_node_lock_try);
738 +
739 +/* it's wrapper of htree_node_unlock_internal */
740 +void
741 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
742 +{
743 +       struct htree_lock_head *lhead = lck->lk_head;
744 +
745 +       BUG_ON(dep >= lck->lk_depth);
746 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
747 +
748 +       htree_spin_lock(lhead, dep);
749 +       htree_node_unlock_internal(lhead, lck, dep, event);
750 +       htree_spin_unlock(lhead, dep);
751 +}
752 +EXPORT_SYMBOL(htree_node_unlock);
753 +
754 +/* stop listening on child-lock level @dep */
755 +void
756 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
757 +{
758 +       struct htree_lock_node *ln = &lck->lk_nodes[dep];
759 +       struct htree_lock_node *tmp;
760 +
761 +       BUG_ON(htree_node_is_granted(lck, dep));
762 +       BUG_ON(!list_empty(&ln->ln_blocked_list));
763 +       BUG_ON(!list_empty(&ln->ln_granted_list));
764 +
765 +       if (!htree_node_is_listening(lck, dep))
766 +               return;
767 +
768 +       htree_spin_lock(lck->lk_head, dep);
769 +       ln->ln_mode = HTREE_LOCK_INVAL;
770 +       ln->ln_ev_target = NULL;
771 +
772 +       if (htree_key_list_empty(ln)) { /* not owner */
773 +               list_del_init(&ln->ln_alive_list);
774 +               goto out;
775 +       }
776 +
777 +       /* I'm the owner... */
778 +       if (list_empty(&ln->ln_alive_list)) { /* no more listener */
779 +               htree_key_list_del_init(ln);
780 +               goto out;
781 +       }
782 +
783 +       tmp = list_entry(ln->ln_alive_list.next,
784 +                        struct htree_lock_node, ln_alive_list);
785 +
786 +       BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
787 +       htree_key_list_replace_init(ln, tmp);
788 +       list_del_init(&ln->ln_alive_list);
789 + out:
790 +       htree_spin_unlock(lck->lk_head, dep);
791 +}
792 +EXPORT_SYMBOL(htree_node_stop_listen);
793 +
794 +/* release all child-locks if we have any */
795 +static void
796 +htree_node_release_all(struct htree_lock *lck)
797 +{
798 +       int     i;
799 +
800 +       for (i = 0; i < lck->lk_depth; i++) {
801 +               if (htree_node_is_granted(lck, i))
802 +                       htree_node_unlock(lck, i, NULL);
803 +               else if (htree_node_is_listening(lck, i))
804 +                       htree_node_stop_listen(lck, i);
805 +       }
806 +}
807 +
808 +/*
809 + * obtain htree lock, it could be blocked inside if there's conflict
810 + * with any granted or blocked lock and @wait is true.
811 + * NB: ALWAYS called holding lhead::lh_lock
812 + */
813 +static int
814 +htree_lock_internal(struct htree_lock *lck, int wait)
815 +{
816 +       struct htree_lock_head *lhead = lck->lk_head;
817 +       int     granted = 0;
818 +       int     blocked = 0;
819 +       int     i;
820 +
821 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
822 +               if (lhead->lh_ngranted[i] != 0)
823 +                       granted |= 1 << i;
824 +               if (lhead->lh_nblocked[i] != 0)
825 +                       blocked |= 1 << i;
826 +       }
827 +       if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
828 +           (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
829 +               /* will block current lock even it just conflicts with any
830 +                * other blocked lock, so lock like EX wouldn't starve */
831 +               if (!wait)
832 +                       return -1;
833 +               lhead->lh_nblocked[lck->lk_mode]++;
834 +               lk_block_inc(lck->lk_mode);
835 +
836 +               lck->lk_task = current;
837 +               list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
838 +
839 +               set_current_state(TASK_UNINTERRUPTIBLE);
840 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
841 +               /* wait to be given the lock */
842 +               if (lck->lk_task != NULL)
843 +                       schedule();
844 +               /* granted, no doubt. wake up will set me RUNNING */
845 +               return 0; /* without lh_lock */
846 +       }
847 +       lhead->lh_ngranted[lck->lk_mode]++;
848 +       lk_grant_inc(lck->lk_mode);
849 +       return 1;
850 +}
851 +
852 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
853 +static void
854 +htree_unlock_internal(struct htree_lock *lck)
855 +{
856 +       struct htree_lock_head *lhead = lck->lk_head;
857 +       struct htree_lock *tmp;
858 +       struct htree_lock *tmp2;
859 +       int granted = 0;
860 +       int i;
861 +
862 +       BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
863 +
864 +       lhead->lh_ngranted[lck->lk_mode]--;
865 +       lck->lk_mode = HTREE_LOCK_INVAL;
866 +
867 +       for (i = 0; i < HTREE_LOCK_MAX; i++) {
868 +               if (lhead->lh_ngranted[i] != 0)
869 +                       granted |= 1 << i;
870 +       }
871 +       list_for_each_entry_safe(tmp, tmp2,
872 +                                &lhead->lh_blocked_list, lk_blocked_list) {
873 +               /* conflict with any granted lock? */
874 +               if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
875 +                       break;
876 +
877 +               list_del_init(&tmp->lk_blocked_list);
878 +
879 +               BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
880 +
881 +               lhead->lh_nblocked[tmp->lk_mode]--;
882 +               lhead->lh_ngranted[tmp->lk_mode]++;
883 +               granted |= 1 << tmp->lk_mode;
884 +
885 +               BUG_ON(tmp->lk_task == NULL);
886 +               wake_up_process(tmp->lk_task);
887 +       }
888 +}
889 +
890 +/* it's wrapper of htree_lock_internal and exported interface.
891 + * It always return 1 with granted lock if @wait is true, it can return 0
892 + * if @wait is false and locking request can't be granted immediately */
893 +int
894 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
895 +              htree_lock_mode_t mode, int wait)
896 +{
897 +       int     rc;
898 +
899 +       BUG_ON(lck->lk_depth > lhead->lh_depth);
900 +       BUG_ON(lck->lk_head != NULL);
901 +       BUG_ON(lck->lk_task != NULL);
902 +
903 +       lck->lk_head = lhead;
904 +       lck->lk_mode = mode;
905 +
906 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
907 +       rc = htree_lock_internal(lck, wait);
908 +       if (rc != 0)
909 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
910 +       return rc >= 0;
911 +}
912 +EXPORT_SYMBOL(htree_lock_try);
913 +
914 +/* it's wrapper of htree_unlock_internal and exported interface.
915 + * It will release all htree_node_locks and htree_lock */
916 +void
917 +htree_unlock(struct htree_lock *lck)
918 +{
919 +       BUG_ON(lck->lk_head == NULL);
920 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
921 +
922 +       htree_node_release_all(lck);
923 +
924 +       htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
925 +       htree_unlock_internal(lck);
926 +       htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
927 +       lck->lk_head = NULL;
928 +       lck->lk_task = NULL;
929 +}
930 +EXPORT_SYMBOL(htree_unlock);
931 +
932 +/* change lock mode */
933 +void
934 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
935 +{
936 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
937 +       lck->lk_mode = mode;
938 +}
939 +EXPORT_SYMBOL(htree_change_mode);
940 +
941 +/* release htree lock, and lock it again with new mode.
942 + * This function will first release all htree_node_locks and htree_lock,
943 + * then try to gain htree_lock with new @mode.
944 + * It always return 1 with granted lock if @wait is true, it can return 0
945 + * if @wait is false and locking request can't be granted immediately */
946 +int
947 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
948 +{
949 +       struct htree_lock_head *lhead = lck->lk_head;
950 +       int rc;
951 +
952 +       BUG_ON(lhead == NULL);
953 +       BUG_ON(lck->lk_mode == mode);
954 +       BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
955 +
956 +       htree_node_release_all(lck);
957 +
958 +       htree_spin_lock(lhead, HTREE_DEP_ROOT);
959 +       htree_unlock_internal(lck);
960 +       lck->lk_mode = mode;
961 +       rc = htree_lock_internal(lck, wait);
962 +       if (rc != 0)
963 +               htree_spin_unlock(lhead, HTREE_DEP_ROOT);
964 +       return rc >= 0;
965 +}
966 +EXPORT_SYMBOL(htree_change_lock_try);
967 +
968 +/* create a htree_lock head with @depth levels (number of child-locks),
969 + * it is a per resoruce structure */
970 +struct htree_lock_head *
971 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
972 +{
973 +       struct htree_lock_head *lhead;
974 +       int  i;
975 +
976 +       if (depth > HTREE_LOCK_DEP_MAX) {
977 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
978 +                       depth, HTREE_LOCK_DEP_MAX);
979 +               return NULL;
980 +       }
981 +
982 +       lhead = kzalloc(offsetof(struct htree_lock_head,
983 +                                lh_children[depth]) + priv, GFP_NOFS);
984 +       if (lhead == NULL)
985 +               return NULL;
986 +
987 +       if (hbits < HTREE_HBITS_MIN)
988 +               lhead->lh_hbits = HTREE_HBITS_MIN;
989 +       else if (hbits > HTREE_HBITS_MAX)
990 +               lhead->lh_hbits = HTREE_HBITS_MAX;
991 +
992 +       lhead->lh_lock = 0;
993 +       lhead->lh_depth = depth;
994 +       INIT_LIST_HEAD(&lhead->lh_blocked_list);
995 +       if (priv > 0) {
996 +               lhead->lh_private = (void *)lhead +
997 +                       offsetof(struct htree_lock_head, lh_children[depth]);
998 +       }
999 +
1000 +       for (i = 0; i < depth; i++) {
1001 +               INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
1002 +               lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1003 +       }
1004 +       return lhead;
1005 +}
1006 +EXPORT_SYMBOL(htree_lock_head_alloc);
1007 +
1008 +/* free the htree_lock head */
1009 +void
1010 +htree_lock_head_free(struct htree_lock_head *lhead)
1011 +{
1012 +       int     i;
1013 +
1014 +       BUG_ON(!list_empty(&lhead->lh_blocked_list));
1015 +       for (i = 0; i < lhead->lh_depth; i++)
1016 +               BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1017 +       kfree(lhead);
1018 +}
1019 +EXPORT_SYMBOL(htree_lock_head_free);
1020 +
1021 +/* register event callback for @events of child-lock at level @dep */
1022 +void
1023 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1024 +                       unsigned events, htree_event_cb_t callback)
1025 +{
1026 +       BUG_ON(lhead->lh_depth <= dep);
1027 +       lhead->lh_children[dep].lc_events = events;
1028 +       lhead->lh_children[dep].lc_callback = callback;
1029 +}
1030 +EXPORT_SYMBOL(htree_lock_event_attach);
1031 +
1032 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1033 + * extra-bytes as private data for caller */
1034 +struct htree_lock *
1035 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1036 +{
1037 +       struct htree_lock *lck;
1038 +       int i = offsetof(struct htree_lock, lk_nodes[depth]);
1039 +
1040 +       if (depth > HTREE_LOCK_DEP_MAX) {
1041 +               printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1042 +                       depth, HTREE_LOCK_DEP_MAX);
1043 +               return NULL;
1044 +       }
1045 +       lck = kzalloc(i + pbytes, GFP_NOFS);
1046 +       if (lck == NULL)
1047 +               return NULL;
1048 +
1049 +       if (pbytes != 0)
1050 +               lck->lk_private = (void *)lck + i;
1051 +       lck->lk_mode = HTREE_LOCK_INVAL;
1052 +       lck->lk_depth = depth;
1053 +       INIT_LIST_HEAD(&lck->lk_blocked_list);
1054 +
1055 +       for (i = 0; i < depth; i++) {
1056 +               struct htree_lock_node *node = &lck->lk_nodes[i];
1057 +
1058 +               node->ln_mode = HTREE_LOCK_INVAL;
1059 +               INIT_LIST_HEAD(&node->ln_major_list);
1060 +               INIT_LIST_HEAD(&node->ln_minor_list);
1061 +               INIT_LIST_HEAD(&node->ln_alive_list);
1062 +               INIT_LIST_HEAD(&node->ln_blocked_list);
1063 +               INIT_LIST_HEAD(&node->ln_granted_list);
1064 +       }
1065 +
1066 +       return lck;
1067 +}
1068 +EXPORT_SYMBOL(htree_lock_alloc);
1069 +
1070 +/* free htree_lock node */
1071 +void
1072 +htree_lock_free(struct htree_lock *lck)
1073 +{
1074 +       BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1075 +       kfree(lck);
1076 +}
1077 +EXPORT_SYMBOL(htree_lock_free);
1078 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
1079 ===================================================================
1080 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
1081 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
1082 @@ -27,6 +27,7 @@
1083  #include <linux/mutex.h>
1084  #include <linux/timer.h>
1085  #include <linux/wait.h>
1086 +#include <linux/htree_lock.h>
1087  #include <linux/blockgroup_lock.h>
1088  #include <linux/percpu_counter.h>
1089  #ifdef __KERNEL__
1090 @@ -1625,6 +1626,71 @@ ext4_dir_htree_level(struct super_block
1091                 EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
1092  }
1093  
1094 +/* assume name-hash is protected by upper layer */
1095 +#define EXT4_HTREE_LOCK_HASH   0
1096 +
1097 +enum ext4_pdo_lk_types {
1098 +#if EXT4_HTREE_LOCK_HASH
1099 +       EXT4_LK_HASH,
1100 +#endif
1101 +       EXT4_LK_DX,             /* index block */
1102 +       EXT4_LK_DE,             /* directory entry block */
1103 +       EXT4_LK_SPIN,           /* spinlock */
1104 +       EXT4_LK_MAX,
1105 +};
1106 +
1107 +/* read-only bit */
1108 +#define EXT4_LB_RO(b)          (1 << (b))
1109 +/* read + write, high bits for writer */
1110 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1111 +
1112 +enum ext4_pdo_lock_bits {
1113 +       /* DX lock bits */
1114 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
1115 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
1116 +       /* DE lock bits */
1117 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
1118 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
1119 +       /* DX spinlock bits */
1120 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
1121 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
1122 +       /* accurate searching */
1123 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
1124 +};
1125 +
1126 +enum ext4_pdo_lock_opc {
1127 +       /* external */
1128 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1129 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1130 +                                  EXT4_LB_EXACT),
1131 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1132 +                                  EXT4_LB_EXACT),
1133 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1134 +
1135 +       /* internal */
1136 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1137 +                                  EXT4_LB_EXACT),
1138 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1139 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1140 +};
1141 +
1142 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1143 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
1144 +
1145 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1146 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
1147 +
1148 +extern void ext4_htree_lock(struct htree_lock *lck,
1149 +                           struct htree_lock_head *lhead,
1150 +                           struct inode *dir, unsigned flags);
1151 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
1152 +
1153 +extern struct buffer_head * __ext4_find_entry(struct inode *dir,
1154 +                                       const struct qstr *d_name,
1155 +                                       struct ext4_dir_entry_2 **res_dir,
1156 +                                       struct htree_lock *lck);
1157 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1158 +                     struct inode *inode, struct htree_lock *lck);
1159  void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
1160                         ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
1161  
1162 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
1163 ===================================================================
1164 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
1165 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
1166 @@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
1167                                  struct inode *dir,
1168                                  struct dx_hash_info *hinfo,
1169                                  struct dx_frame *frame,
1170 -                                int *err);
1171 +                                struct htree_lock *lck, int *err);
1172  static void dx_release(struct dx_frame *frames);
1173  static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1174                        struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1175 @@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
1176  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1177                                  struct dx_frame *frame,
1178                                  struct dx_frame *frames,
1179 -                                __u32 *start_hash);
1180 +                                __u32 *start_hash, struct htree_lock *lck);
1181  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1182                 const struct qstr *d_name,
1183                 struct ext4_dir_entry_2 **res_dir,
1184 -               int *err);
1185 +               struct htree_lock *lck, int *err);
1186  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1187 -                            struct inode *inode);
1188 +                            struct inode *inode, struct htree_lock *lck);
1189  
1190  /*
1191   * p is at least 6 bytes before the end of page
1192 @@ -368,6 +368,225 @@ struct stats dx_show_entries(struct dx_h
1193  }
1194  #endif /* DX_DEBUG */
1195  
1196 +/* private data for htree_lock */
1197 +struct ext4_dir_lock_data {
1198 +       unsigned                ld_flags;  /* bits-map for lock types */
1199 +       unsigned                ld_count;  /* # entries of the last DX block */
1200 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
1201 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
1202 +};
1203 +
1204 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
1205 +#define ext4_find_entry(dir, name, dirent) __ext4_find_entry(dir, name, dirent, NULL)
1206 +#define ext4_add_entry(handle, dentry, inode) __ext4_add_entry(handle, dentry, inode, NULL)
1207 +
1208 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1209 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
1210 +
1211 +static void ext4_htree_event_cb(void *target, void *event)
1212 +{
1213 +       u64 *block = (u64 *)target;
1214 +
1215 +       if (*block == dx_get_block((struct dx_entry *)event))
1216 +               *block = EXT4_HTREE_NODE_CHANGED;
1217 +}
1218 +
1219 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1220 +{
1221 +       struct htree_lock_head *lhead;
1222 +
1223 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1224 +       if (lhead != NULL) {
1225 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1226 +                                       ext4_htree_event_cb);
1227 +       }
1228 +       return lhead;
1229 +}
1230 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1231 +
1232 +struct htree_lock *ext4_htree_lock_alloc(void)
1233 +{
1234 +       return htree_lock_alloc(EXT4_LK_MAX,
1235 +                               sizeof(struct ext4_dir_lock_data));
1236 +}
1237 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1238 +
1239 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1240 +{
1241 +       switch (flags) {
1242 +       default: /* 0 or unknown flags require EX lock */
1243 +               return HTREE_LOCK_EX;
1244 +       case EXT4_HLOCK_READDIR:
1245 +               return HTREE_LOCK_PR;
1246 +       case EXT4_HLOCK_LOOKUP:
1247 +               return HTREE_LOCK_CR;
1248 +       case EXT4_HLOCK_DEL:
1249 +       case EXT4_HLOCK_ADD:
1250 +               return HTREE_LOCK_CW;
1251 +       }
1252 +}
1253 +
1254 +/* return PR for read-only operations, otherwise return EX */
1255 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1256 +{
1257 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1258 +
1259 +       /* 0 requires EX lock */
1260 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1261 +}
1262 +
1263 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1264 +{
1265 +       int writer;
1266 +
1267 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1268 +               return 1;
1269 +
1270 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1271 +                EXT4_LB_DE;
1272 +       if (writer) /* all readers & writers are excluded? */
1273 +               return lck->lk_mode == HTREE_LOCK_EX;
1274 +
1275 +       /* all writers are excluded? */
1276 +       return lck->lk_mode == HTREE_LOCK_PR ||
1277 +              lck->lk_mode == HTREE_LOCK_PW ||
1278 +              lck->lk_mode == HTREE_LOCK_EX;
1279 +}
1280 +
1281 +/* relock htree_lock with EX mode if it's change operation, otherwise
1282 + * relock it with PR mode. It's noop if PDO is disabled. */
1283 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1284 +{
1285 +       if (!ext4_htree_safe_locked(lck)) {
1286 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1287 +
1288 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
1289 +       }
1290 +}
1291 +
1292 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1293 +                    struct inode *dir, unsigned flags)
1294 +{
1295 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1296 +                                             ext4_htree_safe_mode(flags);
1297 +
1298 +       ext4_htree_lock_data(lck)->ld_flags = flags;
1299 +       htree_lock(lck, lhead, mode);
1300 +       if (!is_dx(dir))
1301 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1302 +}
1303 +EXPORT_SYMBOL(ext4_htree_lock);
1304 +
1305 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1306 +                               unsigned lmask, int wait, void *ev)
1307 +{
1308 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
1309 +       u32     mode;
1310 +
1311 +       /* NOOP if htree is well protected or caller doesn't require the lock */
1312 +       if (ext4_htree_safe_locked(lck) ||
1313 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1314 +               return 1;
1315 +
1316 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1317 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
1318 +       while (1) {
1319 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1320 +                       return 1;
1321 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1322 +                       return 0;
1323 +               cpu_relax(); /* spin until granted */
1324 +       }
1325 +}
1326 +
1327 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1328 +{
1329 +       return ext4_htree_safe_locked(lck) ||
1330 +              htree_node_is_granted(lck, ffz(~lmask));
1331 +}
1332 +
1333 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1334 +                                  unsigned lmask, void *buf)
1335 +{
1336 +       /* NB: it's safe to call mutiple times or even it's not locked */
1337 +       if (!ext4_htree_safe_locked(lck) &&
1338 +            htree_node_is_granted(lck, ffz(~lmask)))
1339 +               htree_node_unlock(lck, ffz(~lmask), buf);
1340 +}
1341 +
1342 +#define ext4_htree_dx_lock(lck, key)           \
1343 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1344 +#define ext4_htree_dx_lock_try(lck, key)       \
1345 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1346 +#define ext4_htree_dx_unlock(lck)              \
1347 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1348 +#define ext4_htree_dx_locked(lck)              \
1349 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
1350 +
1351 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1352 +{
1353 +       struct ext4_dir_lock_data *ld;
1354 +
1355 +       if (ext4_htree_safe_locked(lck))
1356 +               return;
1357 +
1358 +       ld = ext4_htree_lock_data(lck);
1359 +       switch (ld->ld_flags) {
1360 +       default:
1361 +               return;
1362 +       case EXT4_HLOCK_LOOKUP:
1363 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1364 +               return;
1365 +       case EXT4_HLOCK_DEL:
1366 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1367 +               return;
1368 +       case EXT4_HLOCK_ADD:
1369 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
1370 +               return;
1371 +       }
1372 +}
1373 +
1374 +#define ext4_htree_de_lock(lck, key)           \
1375 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1376 +#define ext4_htree_de_unlock(lck)              \
1377 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1378 +
1379 +#define ext4_htree_spin_lock(lck, key, event)  \
1380 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1381 +#define ext4_htree_spin_unlock(lck)            \
1382 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1383 +#define ext4_htree_spin_unlock_listen(lck, p)  \
1384 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1385 +
1386 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1387 +{
1388 +       if (!ext4_htree_safe_locked(lck) &&
1389 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1390 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1391 +}
1392 +
1393 +enum {
1394 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
1395 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
1396 +       DX_HASH_COL_NO,         /* there is no collision */
1397 +};
1398 +
1399 +static int dx_probe_hash_collision(struct htree_lock *lck,
1400 +                                  struct dx_entry *entries,
1401 +                                  struct dx_entry *at, u32 hash)
1402 +{
1403 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1404 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
1405 +
1406 +       } else if (at == entries + dx_get_count(entries) - 1) {
1407 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1408 +
1409 +       } else { /* hash collision? */
1410 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
1411 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
1412 +       }
1413 +}
1414 +
1415  /*
1416   * Probe for a directory leaf block to search.
1417   *
1418 @@ -379,10 +598,11 @@ struct stats dx_show_entries(struct dx_h
1419   */
1420  static struct dx_frame *
1421  dx_probe(const struct qstr *d_name, struct inode *dir,
1422 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1423 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1424 +        struct htree_lock *lck, int *err)
1425  {
1426         unsigned count, indirect;
1427 -       struct dx_entry *at, *entries, *p, *q, *m;
1428 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1429         struct dx_root_info * info;
1430         struct buffer_head *bh;
1431         struct dx_frame *frame = frame_in;
1432 @@ -447,8 +667,15 @@ dx_probe(const struct qstr *d_name, stru
1433         dxtrace(printk("Look up %x", hash));
1434         while (1)
1435         {
1436 +               if (indirect == 0) { /* the last index level */
1437 +                       /* NB: ext4_htree_dx_lock() could be noop if
1438 +                        * DX-lock flag is not set for current operation */
1439 +                       ext4_htree_dx_lock(lck, dx);
1440 +                       ext4_htree_spin_lock(lck, dx, NULL);
1441 +               }
1442                 count = dx_get_count(entries);
1443 -               if (!count || count > dx_get_limit(entries)) {
1444 +               if (count == 0 || count > dx_get_limit(entries)) {
1445 +                       ext4_htree_spin_unlock(lck); /* release spin */
1446                         ext4_warning(dir->i_sb,
1447                                      "dx entry: no count or count > limit");
1448                         brelse(bh);
1449 @@ -489,9 +716,73 @@ dx_probe(const struct qstr *d_name, stru
1450                 frame->bh = bh;
1451                 frame->entries = entries;
1452                 frame->at = at;
1453 -               if (!indirect--) return frame;
1454 +
1455 +               if (indirect == 0) { /* the last index level */
1456 +                       struct ext4_dir_lock_data *ld;
1457 +                       u64 myblock;
1458 +
1459 +                       /* By default we only lock DE-block, however, we will
1460 +                        * also lock the last level DX-block if:
1461 +                        * a) there is hash collision
1462 +                        *    we will set DX-lock flag (a few lines below)
1463 +                        *    and redo to lock DX-block
1464 +                        *    see detail in dx_probe_hash_collision()
1465 +                        * b) it's a retry from splitting
1466 +                        *    we need to lock the last level DX-block so nobody
1467 +                        *    else can split any leaf blocks under the same
1468 +                        *    DX-block, see detail in ext4_dx_add_entry()
1469 +                        */
1470 +                       if (ext4_htree_dx_locked(lck)) {
1471 +                               /* DX-block is locked, just lock DE-block
1472 +                                * and return */
1473 +                               ext4_htree_spin_unlock(lck);
1474 +                               if (!ext4_htree_safe_locked(lck))
1475 +                                       ext4_htree_de_lock(lck, frame->at);
1476 +                               return frame;
1477 +                       }
1478 +                       /* it's pdirop and no DX lock */
1479 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
1480 +                           DX_HASH_COL_YES) {
1481 +                               /* found hash collision, set DX-lock flag
1482 +                                * and retry to abtain DX-lock */
1483 +                               ext4_htree_spin_unlock(lck);
1484 +                               ext4_htree_dx_need_lock(lck);
1485 +                               continue;
1486 +                       }
1487 +                       ld = ext4_htree_lock_data(lck);
1488 +                       /* because I don't lock DX, so @at can't be trusted
1489 +                        * after I release spinlock so I have to save it */
1490 +                       ld->ld_at = at;
1491 +                       ld->ld_at_entry = *at;
1492 +                       ld->ld_count = dx_get_count(entries);
1493 +
1494 +                       frame->at = &ld->ld_at_entry;
1495 +                       myblock = dx_get_block(at);
1496 +
1497 +                       /* NB: ordering locking */
1498 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
1499 +                       /* other thread can split this DE-block because:
1500 +                        * a) I don't have lock for the DE-block yet
1501 +                        * b) I released spinlock on DX-block
1502 +                        * if it happened I can detect it by listening
1503 +                        * splitting event on this DE-block */
1504 +                       ext4_htree_de_lock(lck, frame->at);
1505 +                       ext4_htree_spin_stop_listen(lck);
1506 +
1507 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
1508 +                               /* someone split this DE-block before
1509 +                                * I locked it, I need to retry and lock
1510 +                                * valid DE-block */
1511 +                               ext4_htree_de_unlock(lck);
1512 +                               continue;
1513 +                       }
1514 +                       return frame;
1515 +               }
1516 +               dx = at;
1517 +               indirect--;
1518                 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
1519                         goto fail2;
1520 +
1521                 at = entries = ((struct dx_node *) bh->b_data)->entries;
1522                 if (dx_get_limit(entries) != dx_node_limit (dir)) {
1523                         ext4_warning(dir->i_sb,
1524 @@ -553,7 +844,7 @@ static void dx_release (struct dx_frame
1525  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1526                                  struct dx_frame *frame,
1527                                  struct dx_frame *frames,
1528 -                                __u32 *start_hash)
1529 +                                __u32 *start_hash, struct htree_lock *lck)
1530  {
1531         struct dx_frame *p;
1532         struct buffer_head *bh;
1533 @@ -568,12 +859,22 @@ static int ext4_htree_next_block(struct
1534          * this loop, num_frames indicates the number of interior
1535          * nodes need to be read.
1536          */
1537 +       ext4_htree_de_unlock(lck);
1538         while (1) {
1539 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
1540 -                       break;
1541 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1542 +                       /* num_frames > 0 :
1543 +                        *   DX block
1544 +                        * ext4_htree_dx_locked:
1545 +                        *   frame->at is reliable pointer returned by dx_probe,
1546 +                        *   otherwise dx_probe already knew no collision */
1547 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
1548 +                               break;
1549 +               }
1550                 if (p == frames)
1551                         return 0;
1552                 num_frames++;
1553 +               if (num_frames == 1)
1554 +                       ext4_htree_dx_unlock(lck);
1555                 p--;
1556         }
1557  
1558 @@ -596,6 +897,13 @@ static int ext4_htree_next_block(struct
1559          * block so no check is necessary
1560          */
1561         while (num_frames--) {
1562 +               if (num_frames == 0) {
1563 +                       /* it's not always necessary, we just don't want to
1564 +                        * detect hash collision again */
1565 +                       ext4_htree_dx_need_lock(lck);
1566 +                       ext4_htree_dx_lock(lck, p->at);
1567 +               }
1568 +
1569                 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
1570                                       0, &err)))
1571                         return err; /* Failure */
1572 @@ -604,6 +912,7 @@ static int ext4_htree_next_block(struct
1573                 p->bh = bh;
1574                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1575         }
1576 +       ext4_htree_de_lock(lck, p->at);
1577         return 1;
1578  }
1579  
1580 @@ -696,10 +1005,10 @@ int ext4_htree_fill_tree(struct file *di
1581         }
1582         hinfo.hash = start_hash;
1583         hinfo.minor_hash = 0;
1584 -       frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1585 +       /* assume it's PR locked */
1586 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1587         if (!frame)
1588                 return err;
1589 -
1590         /* Add '.' and '..' from the htree header */
1591         if (!start_hash && !start_minor_hash) {
1592                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1593 @@ -726,7 +1035,7 @@ int ext4_htree_fill_tree(struct file *di
1594                 count += ret;
1595                 hashval = ~0;
1596                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1597 -                                           frame, frames, &hashval);
1598 +                                           frame, frames, &hashval, NULL);
1599                 *next_hash = hashval;
1600                 if (ret < 0) {
1601                         err = ret;
1602 @@ -826,9 +1135,17 @@ static void dx_insert_block(struct dx_fr
1603  
1604  static void ext4_update_dx_flag(struct inode *inode)
1605  {
1606 +       /* Disable it for ldiskfs, because going from a DX directory to
1607 +        * a non-DX directory while it is in use will completely break
1608 +        * the htree-locking.
1609 +        * If we really want to support this operation in the future,
1610 +        * we need to exclusively lock the directory at here which will
1611 +        * increase complexity of code */
1612 +#if 0
1613         if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1614                                      EXT4_FEATURE_COMPAT_DIR_INDEX))
1615                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1616 +#endif
1617  }
1618  
1619  /*
1620 @@ -900,9 +1217,10 @@ static inline int search_dirblock(struct
1621   * The returned buffer_head has ->b_count elevated.  The caller is expected
1622   * to brelse() it when appropriate.
1623   */
1624 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1625 +struct buffer_head * __ext4_find_entry(struct inode *dir,
1626                                         const struct qstr *d_name,
1627 -                                       struct ext4_dir_entry_2 ** res_dir)
1628 +                                       struct ext4_dir_entry_2 **res_dir,
1629 +                                       struct htree_lock *lck)
1630  {
1631         struct super_block *sb;
1632         struct buffer_head *bh_use[NAMEI_RA_SIZE];
1633 @@ -923,7 +1241,7 @@ static struct buffer_head * ext4_find_en
1634         if (namelen > EXT4_NAME_LEN)
1635                 return NULL;
1636         if (is_dx(dir)) {
1637 -               bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1638 +               bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1639                 /*
1640                  * On success, or if the error was file not found,
1641                  * return.  Otherwise, fall back to doing a search the
1642 @@ -933,6 +1251,7 @@ static struct buffer_head * ext4_find_en
1643                         return bh;
1644                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1645                                "falling back\n"));
1646 +               ext4_htree_safe_relock(lck);
1647         }
1648         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1649         start = EXT4_I(dir)->i_dir_start_lookup;
1650 @@ -1008,9 +1327,12 @@ cleanup_and_exit:
1651                 brelse(bh_use[ra_ptr]);
1652         return ret;
1653  }
1654 +EXPORT_SYMBOL(__ext4_find_entry);
1655  
1656 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1657 -                      struct ext4_dir_entry_2 **res_dir, int *err)
1658 +static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1659 +                               const struct qstr *d_name,
1660 +                               struct ext4_dir_entry_2 **res_dir,
1661 +                               struct htree_lock *lck, int *err)
1662  {
1663         struct super_block * sb;
1664         struct dx_hash_info     hinfo;
1665 @@ -1026,13 +1348,16 @@ static struct buffer_head * ext4_dx_find
1666         sb = dir->i_sb;
1667         /* NFS may look up ".." - look at dx_root directory block */
1668         if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
1669 -               if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1670 +               if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1671                         return NULL;
1672         } else {
1673                 frame = frames;
1674                 frame->bh = NULL;                       /* for dx_release() */
1675                 frame->at = (struct dx_entry *)frames;  /* hack for zero entry*/
1676                 dx_set_block(frame->at, 0);             /* dx_root block is 0 */
1677 +               /* "." and ".." are stored in root DX lock */
1678 +               ext4_htree_dx_need_lock(lck);
1679 +               ext4_htree_dx_lock(lck, NULL);
1680         }
1681         hash = hinfo.hash;
1682         do {
1683 @@ -1061,7 +1386,7 @@ static struct buffer_head * ext4_dx_find
1684                 brelse(bh);
1685                 /* Check to see if we should continue to search */
1686                 retval = ext4_htree_next_block(dir, hash, frame,
1687 -                                              frames, NULL);
1688 +                                              frames, NULL, lck);
1689                 if (retval < 0) {
1690                         ext4_warning(sb,
1691                              "error reading index page in directory #%lu",
1692 @@ -1244,8 +1569,9 @@ static struct ext4_dir_entry_2* dx_pack_
1693   * Returns pointer to de in block into which the new entry will be inserted.
1694   */
1695  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1696 -                       struct buffer_head **bh,struct dx_frame *frame,
1697 -                       struct dx_hash_info *hinfo, int *error)
1698 +                       struct buffer_head **bh, struct dx_frame *frames,
1699 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
1700 +                       struct htree_lock *lck, int *error)
1701  {
1702         unsigned blocksize = dir->i_sb->s_blocksize;
1703         unsigned count, continued;
1704 @@ -1302,7 +1628,14 @@ static struct ext4_dir_entry_2 *do_split
1705                                         hash2, split, count-split));
1706  
1707         /* Fancy dance to stay within two buffers */
1708 -       de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1709 +       if (hinfo->hash < hash2) {
1710 +               de2 = dx_move_dirents(data1, data2, map + split,
1711 +                                     count - split, blocksize);
1712 +       } else {
1713 +               /* make sure we will add entry to the same block which
1714 +                * we have already locked */
1715 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1716 +       }
1717         de = dx_pack_dirents(data1, blocksize);
1718         de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
1719                                            blocksize);
1720 @@ -1311,13 +1644,21 @@ static struct ext4_dir_entry_2 *do_split
1721         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1722         dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1723  
1724 -       /* Which block gets the new entry? */
1725 -       if (hinfo->hash >= hash2)
1726 -       {
1727 -               swap(*bh, bh2);
1728 -               de = de2;
1729 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1730 +                            frame->at); /* notify block is being split */
1731 +       if (hinfo->hash < hash2) {
1732 +               dx_insert_block(frame, hash2 + continued, newblock);
1733 +
1734 +       } else {
1735 +               /* switch block number */
1736 +               dx_insert_block(frame, hash2 + continued,
1737 +                               dx_get_block(frame->at));
1738 +               dx_set_block(frame->at, newblock);
1739 +               (frame->at)++;
1740         }
1741 -       dx_insert_block(frame, hash2 + continued, newblock);
1742 +       ext4_htree_spin_unlock(lck);
1743 +       ext4_htree_dx_unlock(lck);
1744 +
1745         err = ext4_handle_dirty_metadata(handle, dir, bh2);
1746         if (err)
1747                 goto journal_error;
1748 @@ -1558,8 +1899,8 @@ static int make_indexed_dir(handle_t *ha
1749         retval = ext4_handle_dirty_metadata(handle, dir, bh2);
1750         if (retval)
1751                 goto out_frames;
1752  
1753 -       de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
1754 +       de = do_split(handle,dir, &bh2, frames, frame, &hinfo, NULL, &retval);
1755         if (!de)
1756                 goto out_frames;
1757  
1758 @@ -1664,8 +2005,8 @@ out:
1759   * may not sleep between calling this and putting something into
1760   * the entry, as someone else might have used it while you slept.
1761   */
1762 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1763 -                         struct inode *inode)
1764 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1765 +                     struct inode *inode, struct htree_lock *lck)
1766  {
1767         struct inode *dir = dentry->d_parent->d_inode;
1768         struct buffer_head *bh;
1769 @@ -1684,9 +2025,10 @@ static int ext4_add_entry(handle_t *hand
1770                 if (dentry->d_name.len == 2 &&
1771                     memcmp(dentry->d_name.name, "..", 2) == 0)
1772                         return ext4_update_dotdot(handle, dentry, inode);
1773 -               retval = ext4_dx_add_entry(handle, dentry, inode);
1774 +               retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1775                 if (!retval || (retval != ERR_BAD_DX_DIR))
1776                         return retval;
1777 +               ext4_htree_safe_relock(lck);
1778                 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1779                 dx_fallback++;
1780                 ext4_mark_inode_dirty(handle, dir);
1781 @@ -1717,12 +2059,13 @@ static int ext4_add_entry(handle_t *hand
1782         brelse(bh);
1783         return retval;
1784  }
1785 +EXPORT_SYMBOL(__ext4_add_entry);
1786  
1787  /*
1788   * Returns 0 for success, or a negative error value
1789   */
1790  static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1791 -                            struct inode *inode)
1792 +                            struct inode *inode, struct htree_lock *lck)
1793  {
1794         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1795         struct dx_entry *entries, *at;
1796 @@ -1736,7 +2079,7 @@ static int ext4_dx_add_entry(handle_t *h
1797  
1798  again:
1799         restart = 0;
1800 -       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1801 +       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1802         if (!frame)
1803                 return err;
1804         entries = frame->entries;
1805 @@ -1763,6 +2106,11 @@ again:
1806                 struct dx_node *node2;
1807                 struct buffer_head *bh2;
1808  
1809 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1810 +                       ext4_htree_safe_relock(lck);
1811 +                       restart = 1;
1812 +                       goto cleanup;
1813 +               }
1814                 while (frame > frames) {
1815                         if (dx_get_count((frame - 1)->entries) <
1816                             dx_get_limit((frame - 1)->entries)) {
1817 @@ -1860,16 +2208,43 @@ again:
1818                         restart = 1;
1819                         goto cleanup;
1820                 }
1821 +       } else if (!ext4_htree_dx_locked(lck)) {
1822 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1823 +
1824 +               /* not well protected, require DX lock */
1825 +               ext4_htree_dx_need_lock(lck);
1826 +               at = frame > frames ? (frame - 1)->at : NULL;
1827 +
1828 +               /* NB: no risk of deadlock because it's just a try.
1829 +                *
1830 +                * NB: we check ld_count for twice, the first time before
1831 +                * having DX lock, the second time after holding DX lock.
1832 +                *
1833 +                * NB: We never free blocks for directory so far, which
1834 +                * means value returned by dx_get_count() should equal to
1835 +                * ld->ld_count if nobody split any DE-block under @at,
1836 +                * and ld->ld_at still points to valid dx_entry. */
1837 +               if ((ld->ld_count != dx_get_count(entries)) ||
1838 +                   !ext4_htree_dx_lock_try(lck, at) ||
1839 +                   (ld->ld_count != dx_get_count(entries))) {
1840 +                       restart = 1;
1841 +                       goto cleanup;
1842 +               }
1843 +               /* OK, I've got DX lock and nothing changed */
1844 +               frame->at = ld->ld_at;
1845         }
1846 -       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1847 +       de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
1848         if (!de)
1849                 goto cleanup;
1850 +
1851         err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1852         goto cleanup;
1853  
1854  journal_error:
1855         ext4_std_error(dir->i_sb, err);
1856  cleanup:
1857 +       ext4_htree_dx_unlock(lck);
1858 +       ext4_htree_de_unlock(lck);
1859         if (bh)
1860                 brelse(bh);
1861         dx_release(frames);
1862 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
1863 ===================================================================
1864 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/Makefile
1865 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
1866 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
1867  
1868  ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
1869                 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
1870 +               htree_lock.o \
1871                 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
1872                 mmp.o
1873