1 Index: linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
2 ===================================================================
4 +++ linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
7 + * include/linux/htree_lock.h
9 + * Copyright (c) 2011, 2012, Intel Corporation.
11 + * Author: Liang Zhen <liang@whamcloud.com>
17 + * htree_lock is an advanced lock, it can support five lock modes (concept is
18 + * taken from DLM) and it's a sleeping lock.
20 + * most common use case is:
21 + * - create a htree_lock_head for data
22 + * - each thread (contender) creates it's own htree_lock
23 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
24 + * call htree_unlock to release lock
26 + * Also, there is advanced use-case which is more complex, user can have
27 + * PW/PR lock on particular key, it's mostly used while user holding shared
28 + * lock on the htree (CW, CR)
30 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
31 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
33 + * htree_node_unlock(lock_node);; unlock the key
35 + * Another tip is, we can have N-levels of this kind of keys, all we need to
36 + * do is specifying N-levels while creating htree_lock_head, then we can
37 + * lock/unlock a specific level by:
38 + * htree_node_lock(lock_node, mode1, key1, level1...);
40 + * htree_node_lock(lock_node, mode1, key2, level2...);
42 + * htree_node_unlock(lock_node, level2);
43 + * htree_node_unlock(lock_node, level1);
45 + * NB: for multi-level, should be careful about locking order to avoid deadlock
48 +#ifndef _LINUX_HTREE_LOCK_H
49 +#define _LINUX_HTREE_LOCK_H
51 +#include <linux/list.h>
52 +#include <linux/spinlock.h>
53 +#include <linux/sched.h>
57 + * more details can be found here:
58 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
61 + HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
62 + HTREE_LOCK_PW, /* protected write: allows only CR users */
63 + HTREE_LOCK_PR, /* protected read: allow PR, CR users */
64 + HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
65 + HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
66 + HTREE_LOCK_MAX, /* number of lock modes */
69 +#define HTREE_LOCK_NL HTREE_LOCK_MAX
70 +#define HTREE_LOCK_INVAL 0xdead10c
73 + HTREE_HBITS_MIN = 2,
74 + HTREE_HBITS_DEF = 14,
75 + HTREE_HBITS_MAX = 32,
79 + HTREE_EVENT_DISABLE = (0),
80 + HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
81 + HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
82 + HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
87 +typedef void (*htree_event_cb_t)(void *target, void *event);
89 +struct htree_lock_child {
90 + struct list_head lc_list; /* granted list */
91 + htree_event_cb_t lc_callback; /* event callback */
92 + unsigned lc_events; /* event types */
95 +struct htree_lock_head {
96 + unsigned long lh_lock; /* bits lock */
97 + /* blocked lock list (htree_lock) */
98 + struct list_head lh_blocked_list;
101 + /* hash bits for key and limit number of locks */
103 + /* counters for blocked locks */
104 + u16 lh_nblocked[HTREE_LOCK_MAX];
105 + /* counters for granted locks */
106 + u16 lh_ngranted[HTREE_LOCK_MAX];
109 + /* array of children locks */
110 + struct htree_lock_child lh_children[0];
113 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
114 +struct htree_lock_node {
115 + htree_lock_mode_t ln_mode;
116 + /* major hash key */
118 + /* minor hash key */
120 + struct list_head ln_major_list;
121 + struct list_head ln_minor_list;
122 + /* alive list, all locks (granted, blocked, listening) are on it */
123 + struct list_head ln_alive_list;
125 + struct list_head ln_blocked_list;
127 + struct list_head ln_granted_list;
128 + void *ln_ev_target;
132 + struct task_struct *lk_task;
133 + struct htree_lock_head *lk_head;
136 + htree_lock_mode_t lk_mode;
137 + struct list_head lk_blocked_list;
138 + struct htree_lock_node lk_nodes[0];
141 +/* create a lock head, which stands for a resource */
142 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
143 + unsigned hbits, unsigned priv);
144 +/* free a lock head */
145 +void htree_lock_head_free(struct htree_lock_head *lhead);
146 +/* register event callback for child lock at level @depth */
147 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
148 + unsigned events, htree_event_cb_t callback);
149 +/* create a lock handle, which stands for a thread */
150 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
151 +/* free a lock handle */
152 +void htree_lock_free(struct htree_lock *lck);
153 +/* lock htree, when @wait is true, 0 is returned if the lock can't
154 + * be granted immediately */
155 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
156 + htree_lock_mode_t mode, int wait);
158 +void htree_unlock(struct htree_lock *lck);
159 +/* unlock and relock htree with @new_mode */
160 +int htree_change_lock_try(struct htree_lock *lck,
161 + htree_lock_mode_t new_mode, int wait);
162 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
163 +/* require child lock (key) of htree at level @dep, @event will be sent to all
164 + * listeners on this @key while lock being granted */
165 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
166 + u32 key, unsigned dep, int wait, void *event);
167 +/* release child lock at level @dep, this lock will listen on it's key
168 + * if @event isn't NULL, event_cb will be called against @lck while granting
169 + * any other lock at level @dep with the same key */
170 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
171 +/* stop listening on child lock at level @dep */
172 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
174 +void htree_lock_stat_print(int depth);
175 +void htree_lock_stat_reset(void);
177 +#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
178 +#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
180 +#define htree_lock_mode(lck) ((lck)->lk_mode)
182 +#define htree_node_lock(lck, mode, key, dep) \
183 + htree_node_lock_try(lck, mode, key, dep, 1, NULL)
184 +/* this is only safe in thread context of lock owner */
185 +#define htree_node_is_granted(lck, dep) \
186 + ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
187 + (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
188 +/* this is only safe in thread context of lock owner */
189 +#define htree_node_is_listening(lck, dep) \
190 + ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
193 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
194 ===================================================================
196 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
199 + * fs/ext4/htree_lock.c
201 + * Copyright (c) 2011, 2012, Intel Corporation.
203 + * Author: Liang Zhen <liang@whamcloud.com>
205 +#include <linux/jbd2.h>
206 +#include <linux/hash.h>
207 +#include <linux/module.h>
208 +#include <linux/htree_lock.h>
211 + HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
212 + HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
213 + HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
214 + HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
215 + HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
219 + HTREE_LOCK_COMPAT_EX = 0,
220 + HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
221 + HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
222 + HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
223 + HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
227 +static int htree_lock_compat[] = {
228 + [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
229 + [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
230 + [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
231 + [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
232 + [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
235 +/* max allowed htree-lock depth.
236 + * We only need depth=3 for ext4 although user can have higher value. */
237 +#define HTREE_LOCK_DEP_MAX 16
239 +#ifdef HTREE_LOCK_DEBUG
241 +static char *hl_name[] = {
242 + [HTREE_LOCK_EX] "EX",
243 + [HTREE_LOCK_PW] "PW",
244 + [HTREE_LOCK_PR] "PR",
245 + [HTREE_LOCK_CW] "CW",
246 + [HTREE_LOCK_CR] "CR",
250 +struct htree_lock_node_stats {
251 + unsigned long long blocked[HTREE_LOCK_MAX];
252 + unsigned long long granted[HTREE_LOCK_MAX];
253 + unsigned long long retried[HTREE_LOCK_MAX];
254 + unsigned long long events;
257 +struct htree_lock_stats {
258 + struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
259 + unsigned long long granted[HTREE_LOCK_MAX];
260 + unsigned long long blocked[HTREE_LOCK_MAX];
263 +static struct htree_lock_stats hl_stats;
265 +void htree_lock_stat_reset(void)
267 + memset(&hl_stats, 0, sizeof(hl_stats));
270 +void htree_lock_stat_print(int depth)
275 + printk(KERN_DEBUG "HTREE LOCK STATS:\n");
276 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
277 + printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
278 + hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
280 + for (i = 0; i < depth; i++) {
281 + printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
282 + for (j = 0; j < HTREE_LOCK_MAX; j++) {
284 + "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
285 + hl_name[j], hl_stats.nodes[i].granted[j],
286 + hl_stats.nodes[i].blocked[j],
287 + hl_stats.nodes[i].retried[j]);
292 +#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
293 +#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
294 +#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
295 +#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
296 +#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
297 +#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
301 +void htree_lock_stat_reset(void) {}
302 +void htree_lock_stat_print(int depth) {}
304 +#define lk_grant_inc(m) do {} while (0)
305 +#define lk_block_inc(m) do {} while (0)
306 +#define ln_grant_inc(d, m) do {} while (0)
307 +#define ln_block_inc(d, m) do {} while (0)
308 +#define ln_retry_inc(d, m) do {} while (0)
309 +#define ln_event_inc(d) do {} while (0)
313 +EXPORT_SYMBOL(htree_lock_stat_reset);
314 +EXPORT_SYMBOL(htree_lock_stat_print);
316 +#define HTREE_DEP_ROOT (-1)
318 +#define htree_spin_lock(lhead, dep) \
319 + bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
320 +#define htree_spin_unlock(lhead, dep) \
321 + bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
323 +#define htree_key_event_ignore(child, ln) \
324 + (!((child)->lc_events & (1 << (ln)->ln_mode)))
327 +htree_key_list_empty(struct htree_lock_node *ln)
329 + return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
333 +htree_key_list_del_init(struct htree_lock_node *ln)
335 + struct htree_lock_node *tmp = NULL;
337 + if (!list_empty(&ln->ln_minor_list)) {
338 + tmp = list_entry(ln->ln_minor_list.next,
339 + struct htree_lock_node, ln_minor_list);
340 + list_del_init(&ln->ln_minor_list);
343 + if (list_empty(&ln->ln_major_list))
346 + if (tmp == NULL) { /* not on minor key list */
347 + list_del_init(&ln->ln_major_list);
349 + BUG_ON(!list_empty(&tmp->ln_major_list));
350 + list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
355 +htree_key_list_replace_init(struct htree_lock_node *old,
356 + struct htree_lock_node *new)
358 + if (!list_empty(&old->ln_major_list))
359 + list_replace_init(&old->ln_major_list, &new->ln_major_list);
361 + if (!list_empty(&old->ln_minor_list))
362 + list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
366 +htree_key_event_enqueue(struct htree_lock_child *child,
367 + struct htree_lock_node *ln, int dep, void *event)
369 + struct htree_lock_node *tmp;
371 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
372 + BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
373 + if (event == NULL || htree_key_event_ignore(child, ln))
376 + /* shouldn't be a very long list */
377 + list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
378 + if (tmp->ln_mode == HTREE_LOCK_NL) {
380 + if (child->lc_callback != NULL)
381 + child->lc_callback(tmp->ln_ev_target, event);
387 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
388 + unsigned dep, int wait, void *event)
390 + struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
391 + struct htree_lock_node *newln = &newlk->lk_nodes[dep];
392 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
394 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
395 + /* NB: we only expect PR/PW lock mode at here, only these two modes are
396 + * allowed for htree_node_lock(asserted in htree_node_lock_internal),
397 + * NL is only used for listener, user can't directly require NL mode */
398 + if ((curln->ln_mode == HTREE_LOCK_NL) ||
399 + (curln->ln_mode != HTREE_LOCK_PW &&
400 + newln->ln_mode != HTREE_LOCK_PW)) {
401 + /* no conflict, attach it on granted list of @curlk */
402 + if (curln->ln_mode != HTREE_LOCK_NL) {
403 + list_add(&newln->ln_granted_list,
404 + &curln->ln_granted_list);
406 + /* replace key owner */
407 + htree_key_list_replace_init(curln, newln);
410 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
411 + htree_key_event_enqueue(child, newln, dep, event);
412 + ln_grant_inc(dep, newln->ln_mode);
413 + return 1; /* still hold lh_lock */
416 + if (!wait) { /* can't grant and don't want to wait */
417 + ln_retry_inc(dep, newln->ln_mode);
418 + newln->ln_mode = HTREE_LOCK_INVAL;
419 + return -1; /* don't wait and just return -1 */
422 + newlk->lk_task = current;
423 + set_current_state(TASK_UNINTERRUPTIBLE);
424 + /* conflict, attach it on blocked list of curlk */
425 + list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
426 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
427 + ln_block_inc(dep, newln->ln_mode);
429 + htree_spin_unlock(newlk->lk_head, dep);
430 + /* wait to be given the lock */
431 + if (newlk->lk_task != NULL)
433 + /* granted, no doubt, wake up will set me RUNNING */
434 + if (event == NULL || htree_key_event_ignore(child, newln))
435 + return 0; /* granted without lh_lock */
437 + htree_spin_lock(newlk->lk_head, dep);
438 + htree_key_event_enqueue(child, newln, dep, event);
439 + return 1; /* still hold lh_lock */
443 + * get PR/PW access to particular tree-node according to @dep and @key,
444 + * it will return -1 if @wait is false and can't immediately grant this lock.
445 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
446 + * @event if it's not NULL.
447 + * NB: ALWAYS called holding lhead::lh_lock
450 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
451 + htree_lock_mode_t mode, u32 key, unsigned dep,
452 + int wait, void *event)
455 + struct htree_lock *tmp;
456 + struct htree_lock *tmp2;
463 + BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
464 + BUG_ON(htree_node_is_granted(lck, dep));
466 + key = hash_long(key, lhead->lh_hbits);
468 + mi_bits = lhead->lh_hbits >> 1;
469 + ma_bits = lhead->lh_hbits - mi_bits;
471 + lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
472 + lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
473 + lck->lk_nodes[dep].ln_mode = mode;
476 + * The major key list is an ordered list, so searches are started
477 + * at the end of the list that is numerically closer to major_key,
478 + * so at most half of the list will be walked (for well-distributed
479 + * keys). The list traversal aborts early if the expected key
480 + * location is passed.
482 + reverse = (major >= (1 << (ma_bits - 1)));
485 + list_for_each_entry_reverse(tmp,
486 + &lhead->lh_children[dep].lc_list,
487 + lk_nodes[dep].ln_major_list) {
488 + if (tmp->lk_nodes[dep].ln_major_key == major) {
491 + } else if (tmp->lk_nodes[dep].ln_major_key < major) {
492 + /* attach _after_ @tmp */
493 + list_add(&lck->lk_nodes[dep].ln_major_list,
494 + &tmp->lk_nodes[dep].ln_major_list);
495 + goto out_grant_major;
499 + list_add(&lck->lk_nodes[dep].ln_major_list,
500 + &lhead->lh_children[dep].lc_list);
501 + goto out_grant_major;
504 + list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
505 + lk_nodes[dep].ln_major_list) {
506 + if (tmp->lk_nodes[dep].ln_major_key == major) {
509 + } else if (tmp->lk_nodes[dep].ln_major_key > major) {
510 + /* insert _before_ @tmp */
511 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
512 + &tmp->lk_nodes[dep].ln_major_list);
513 + goto out_grant_major;
517 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
518 + &lhead->lh_children[dep].lc_list);
519 + goto out_grant_major;
524 + * NB: minor_key list doesn't have a "head", @list is just a
525 + * temporary stub for helping list searching, make sure it's removed
527 + * minor_key list is an ordered list too.
529 + list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
531 + reverse = (minor >= (1 << (mi_bits - 1)));
534 + list_for_each_entry_reverse(tmp2, &list,
535 + lk_nodes[dep].ln_minor_list) {
536 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
539 + } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
540 + /* attach _after_ @tmp2 */
541 + list_add(&lck->lk_nodes[dep].ln_minor_list,
542 + &tmp2->lk_nodes[dep].ln_minor_list);
543 + goto out_grant_minor;
547 + list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
550 + list_for_each_entry(tmp2, &list,
551 + lk_nodes[dep].ln_minor_list) {
552 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
555 + } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
556 + /* insert _before_ @tmp2 */
557 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
558 + &tmp2->lk_nodes[dep].ln_minor_list);
559 + goto out_grant_minor;
563 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
567 + if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
568 + /* new lock @lck is the first one on minor_key list, which
569 + * means it has the smallest minor_key and it should
570 + * replace @tmp as minor_key owner */
571 + list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
572 + &lck->lk_nodes[dep].ln_major_list);
574 + /* remove the temporary head */
578 + ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
579 + return 1; /* granted with holding lh_lock */
582 + list_del(&list); /* remove temprary head */
583 + return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
587 + * release the key of @lck at level @dep, and grant any blocked locks.
588 + * caller will still listen on @key if @event is not NULL, which means
589 + * caller can see a event (by event_cb) while granting any lock with
590 + * the same key at level @dep.
591 + * NB: ALWAYS called holding lhead::lh_lock
592 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
595 +htree_node_unlock_internal(struct htree_lock_head *lhead,
596 + struct htree_lock *curlk, unsigned dep, void *event)
598 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
599 + struct htree_lock *grtlk = NULL;
600 + struct htree_lock_node *grtln;
601 + struct htree_lock *poslk;
602 + struct htree_lock *tmplk;
604 + if (!htree_node_is_granted(curlk, dep))
607 + if (!list_empty(&curln->ln_granted_list)) {
608 + /* there is another granted lock */
609 + grtlk = list_entry(curln->ln_granted_list.next,
611 + lk_nodes[dep].ln_granted_list);
612 + list_del_init(&curln->ln_granted_list);
615 + if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
617 + * @curlk is the only granted lock, so we confirmed:
618 + * a) curln is key owner (attached on major/minor_list),
619 + * so if there is any blocked lock, it should be attached
620 + * on curln->ln_blocked_list
621 + * b) we always can grant the first blocked lock
623 + grtlk = list_entry(curln->ln_blocked_list.next,
625 + lk_nodes[dep].ln_blocked_list);
626 + BUG_ON(grtlk->lk_task == NULL);
627 + wake_up_process(grtlk->lk_task);
630 + if (event != NULL &&
631 + lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
632 + curln->ln_ev_target = event;
633 + curln->ln_mode = HTREE_LOCK_NL; /* listen! */
635 + curln->ln_mode = HTREE_LOCK_INVAL;
638 + if (grtlk == NULL) { /* I must be the only one locking this key */
639 + struct htree_lock_node *tmpln;
641 + BUG_ON(htree_key_list_empty(curln));
643 + if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
646 + /* not listening */
647 + if (list_empty(&curln->ln_alive_list)) { /* no more listener */
648 + htree_key_list_del_init(curln);
652 + tmpln = list_entry(curln->ln_alive_list.next,
653 + struct htree_lock_node, ln_alive_list);
655 + BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
657 + htree_key_list_replace_init(curln, tmpln);
658 + list_del_init(&curln->ln_alive_list);
663 + /* have a granted lock */
664 + grtln = &grtlk->lk_nodes[dep];
665 + if (!list_empty(&curln->ln_blocked_list)) {
666 + /* only key owner can be on both lists */
667 + BUG_ON(htree_key_list_empty(curln));
669 + if (list_empty(&grtln->ln_blocked_list)) {
670 + list_add(&grtln->ln_blocked_list,
671 + &curln->ln_blocked_list);
673 + list_del_init(&curln->ln_blocked_list);
676 + * NB: this is the tricky part:
677 + * We have only two modes for child-lock (PR and PW), also,
678 + * only owner of the key (attached on major/minor_list) can be on
679 + * both blocked_list and granted_list, so @grtlk must be one
680 + * of these two cases:
682 + * a) @grtlk is taken from granted_list, which means we've granted
683 + * more than one lock so @grtlk has to be PR, the first blocked
684 + * lock must be PW and we can't grant it at all.
685 + * So even @grtlk is not owner of the key (empty blocked_list),
686 + * we don't care because we can't grant any lock.
687 + * b) we just grant a new lock which is taken from head of blocked
688 + * list, and it should be the first granted lock, and it should
689 + * be the first one linked on blocked_list.
691 + * Either way, we can get correct result by iterating blocked_list
692 + * of @grtlk, and don't have to bother on how to find out
693 + * owner of current key.
695 + list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
696 + lk_nodes[dep].ln_blocked_list) {
697 + if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
698 + poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
700 + /* grant all readers */
701 + list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
702 + list_add(&poslk->lk_nodes[dep].ln_granted_list,
703 + &grtln->ln_granted_list);
705 + BUG_ON(poslk->lk_task == NULL);
706 + wake_up_process(poslk->lk_task);
709 + /* if @curln is the owner of this key, replace it with @grtln */
710 + if (!htree_key_list_empty(curln))
711 + htree_key_list_replace_init(curln, grtln);
713 + if (curln->ln_mode == HTREE_LOCK_INVAL)
714 + list_del_init(&curln->ln_alive_list);
718 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
719 + * and 0 only if @wait is false and can't grant it immediately
722 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
723 + u32 key, unsigned dep, int wait, void *event)
725 + struct htree_lock_head *lhead = lck->lk_head;
728 + BUG_ON(dep >= lck->lk_depth);
729 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
731 + htree_spin_lock(lhead, dep);
732 + rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
734 + htree_spin_unlock(lhead, dep);
737 +EXPORT_SYMBOL(htree_node_lock_try);
739 +/* it's wrapper of htree_node_unlock_internal */
741 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
743 + struct htree_lock_head *lhead = lck->lk_head;
745 + BUG_ON(dep >= lck->lk_depth);
746 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
748 + htree_spin_lock(lhead, dep);
749 + htree_node_unlock_internal(lhead, lck, dep, event);
750 + htree_spin_unlock(lhead, dep);
752 +EXPORT_SYMBOL(htree_node_unlock);
754 +/* stop listening on child-lock level @dep */
756 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
758 + struct htree_lock_node *ln = &lck->lk_nodes[dep];
759 + struct htree_lock_node *tmp;
761 + BUG_ON(htree_node_is_granted(lck, dep));
762 + BUG_ON(!list_empty(&ln->ln_blocked_list));
763 + BUG_ON(!list_empty(&ln->ln_granted_list));
765 + if (!htree_node_is_listening(lck, dep))
768 + htree_spin_lock(lck->lk_head, dep);
769 + ln->ln_mode = HTREE_LOCK_INVAL;
770 + ln->ln_ev_target = NULL;
772 + if (htree_key_list_empty(ln)) { /* not owner */
773 + list_del_init(&ln->ln_alive_list);
777 + /* I'm the owner... */
778 + if (list_empty(&ln->ln_alive_list)) { /* no more listener */
779 + htree_key_list_del_init(ln);
783 + tmp = list_entry(ln->ln_alive_list.next,
784 + struct htree_lock_node, ln_alive_list);
786 + BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
787 + htree_key_list_replace_init(ln, tmp);
788 + list_del_init(&ln->ln_alive_list);
790 + htree_spin_unlock(lck->lk_head, dep);
792 +EXPORT_SYMBOL(htree_node_stop_listen);
794 +/* release all child-locks if we have any */
796 +htree_node_release_all(struct htree_lock *lck)
800 + for (i = 0; i < lck->lk_depth; i++) {
801 + if (htree_node_is_granted(lck, i))
802 + htree_node_unlock(lck, i, NULL);
803 + else if (htree_node_is_listening(lck, i))
804 + htree_node_stop_listen(lck, i);
809 + * obtain htree lock, it could be blocked inside if there's conflict
810 + * with any granted or blocked lock and @wait is true.
811 + * NB: ALWAYS called holding lhead::lh_lock
814 +htree_lock_internal(struct htree_lock *lck, int wait)
816 + struct htree_lock_head *lhead = lck->lk_head;
821 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
822 + if (lhead->lh_ngranted[i] != 0)
824 + if (lhead->lh_nblocked[i] != 0)
827 + if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
828 + (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
829 + /* will block current lock even it just conflicts with any
830 + * other blocked lock, so lock like EX wouldn't starve */
833 + lhead->lh_nblocked[lck->lk_mode]++;
834 + lk_block_inc(lck->lk_mode);
836 + lck->lk_task = current;
837 + list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
840 + set_current_state(TASK_UNINTERRUPTIBLE);
841 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
842 + /* wait to be given the lock */
843 + if (lck->lk_task != NULL)
845 + /* granted, no doubt. wake up will set me RUNNING.
846 + * Since thread would be waken up accidentally,
847 + * so we need check lock whether granted or not again. */
848 + if (!list_empty(&lck->lk_blocked_list)) {
849 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
850 + if (list_empty(&lck->lk_blocked_list)) {
851 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
856 + return 0; /* without lh_lock */
858 + lhead->lh_ngranted[lck->lk_mode]++;
859 + lk_grant_inc(lck->lk_mode);
863 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
865 +htree_unlock_internal(struct htree_lock *lck)
867 + struct htree_lock_head *lhead = lck->lk_head;
868 + struct htree_lock *tmp;
869 + struct htree_lock *tmp2;
873 + BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
875 + lhead->lh_ngranted[lck->lk_mode]--;
876 + lck->lk_mode = HTREE_LOCK_INVAL;
878 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
879 + if (lhead->lh_ngranted[i] != 0)
882 + list_for_each_entry_safe(tmp, tmp2,
883 + &lhead->lh_blocked_list, lk_blocked_list) {
884 + /* conflict with any granted lock? */
885 + if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
888 + list_del_init(&tmp->lk_blocked_list);
890 + BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
892 + lhead->lh_nblocked[tmp->lk_mode]--;
893 + lhead->lh_ngranted[tmp->lk_mode]++;
894 + granted |= 1 << tmp->lk_mode;
896 + BUG_ON(tmp->lk_task == NULL);
897 + wake_up_process(tmp->lk_task);
901 +/* it's wrapper of htree_lock_internal and exported interface.
902 + * It always return 1 with granted lock if @wait is true, it can return 0
903 + * if @wait is false and locking request can't be granted immediately */
905 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
906 + htree_lock_mode_t mode, int wait)
910 + BUG_ON(lck->lk_depth > lhead->lh_depth);
911 + BUG_ON(lck->lk_head != NULL);
912 + BUG_ON(lck->lk_task != NULL);
914 + lck->lk_head = lhead;
915 + lck->lk_mode = mode;
917 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
918 + rc = htree_lock_internal(lck, wait);
920 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
923 +EXPORT_SYMBOL(htree_lock_try);
925 +/* it's wrapper of htree_unlock_internal and exported interface.
926 + * It will release all htree_node_locks and htree_lock */
928 +htree_unlock(struct htree_lock *lck)
930 + BUG_ON(lck->lk_head == NULL);
931 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
933 + htree_node_release_all(lck);
935 + htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
936 + htree_unlock_internal(lck);
937 + htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
938 + lck->lk_head = NULL;
939 + lck->lk_task = NULL;
941 +EXPORT_SYMBOL(htree_unlock);
943 +/* change lock mode */
945 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
947 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
948 + lck->lk_mode = mode;
950 +EXPORT_SYMBOL(htree_change_mode);
952 +/* release htree lock, and lock it again with new mode.
953 + * This function will first release all htree_node_locks and htree_lock,
954 + * then try to gain htree_lock with new @mode.
955 + * It always return 1 with granted lock if @wait is true, it can return 0
956 + * if @wait is false and locking request can't be granted immediately */
958 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
960 + struct htree_lock_head *lhead = lck->lk_head;
963 + BUG_ON(lhead == NULL);
964 + BUG_ON(lck->lk_mode == mode);
965 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
967 + htree_node_release_all(lck);
969 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
970 + htree_unlock_internal(lck);
971 + lck->lk_mode = mode;
972 + rc = htree_lock_internal(lck, wait);
974 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
977 +EXPORT_SYMBOL(htree_change_lock_try);
979 +/* create a htree_lock head with @depth levels (number of child-locks),
980 + * it is a per resoruce structure */
981 +struct htree_lock_head *
982 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
984 + struct htree_lock_head *lhead;
987 + if (depth > HTREE_LOCK_DEP_MAX) {
988 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
989 + depth, HTREE_LOCK_DEP_MAX);
993 + lhead = kzalloc(offsetof(struct htree_lock_head,
994 + lh_children[depth]) + priv, GFP_NOFS);
998 + if (hbits < HTREE_HBITS_MIN)
999 + lhead->lh_hbits = HTREE_HBITS_MIN;
1000 + else if (hbits > HTREE_HBITS_MAX)
1001 + lhead->lh_hbits = HTREE_HBITS_MAX;
1003 + lhead->lh_lock = 0;
1004 + lhead->lh_depth = depth;
1005 + INIT_LIST_HEAD(&lhead->lh_blocked_list);
1007 + lhead->lh_private = (void *)lhead +
1008 + offsetof(struct htree_lock_head, lh_children[depth]);
1011 + for (i = 0; i < depth; i++) {
1012 + INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
1013 + lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1017 +EXPORT_SYMBOL(htree_lock_head_alloc);
1019 +/* free the htree_lock head */
1021 +htree_lock_head_free(struct htree_lock_head *lhead)
1025 + BUG_ON(!list_empty(&lhead->lh_blocked_list));
1026 + for (i = 0; i < lhead->lh_depth; i++)
1027 + BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1030 +EXPORT_SYMBOL(htree_lock_head_free);
1032 +/* register event callback for @events of child-lock at level @dep */
1034 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1035 + unsigned events, htree_event_cb_t callback)
1037 + BUG_ON(lhead->lh_depth <= dep);
1038 + lhead->lh_children[dep].lc_events = events;
1039 + lhead->lh_children[dep].lc_callback = callback;
1041 +EXPORT_SYMBOL(htree_lock_event_attach);
1043 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1044 + * extra-bytes as private data for caller */
1045 +struct htree_lock *
1046 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1048 + struct htree_lock *lck;
1049 + int i = offsetof(struct htree_lock, lk_nodes[depth]);
1051 + if (depth > HTREE_LOCK_DEP_MAX) {
1052 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1053 + depth, HTREE_LOCK_DEP_MAX);
1056 + lck = kzalloc(i + pbytes, GFP_NOFS);
1061 + lck->lk_private = (void *)lck + i;
1062 + lck->lk_mode = HTREE_LOCK_INVAL;
1063 + lck->lk_depth = depth;
1064 + INIT_LIST_HEAD(&lck->lk_blocked_list);
1066 + for (i = 0; i < depth; i++) {
1067 + struct htree_lock_node *node = &lck->lk_nodes[i];
1069 + node->ln_mode = HTREE_LOCK_INVAL;
1070 + INIT_LIST_HEAD(&node->ln_major_list);
1071 + INIT_LIST_HEAD(&node->ln_minor_list);
1072 + INIT_LIST_HEAD(&node->ln_alive_list);
1073 + INIT_LIST_HEAD(&node->ln_blocked_list);
1074 + INIT_LIST_HEAD(&node->ln_granted_list);
1079 +EXPORT_SYMBOL(htree_lock_alloc);
1081 +/* free htree_lock node */
1083 +htree_lock_free(struct htree_lock *lck)
1085 + BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1088 +EXPORT_SYMBOL(htree_lock_free);
1089 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
1090 ===================================================================
1091 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
1092 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
1094 #include <linux/mutex.h>
1095 #include <linux/timer.h>
1096 #include <linux/wait.h>
1097 +#include <linux/htree_lock.h>
1098 #include <linux/blockgroup_lock.h>
1099 #include <linux/percpu_counter.h>
1101 @@ -1625,6 +1626,71 @@ ext4_dir_htree_level(struct super_block
1102 EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
1105 +/* assume name-hash is protected by upper layer */
1106 +#define EXT4_HTREE_LOCK_HASH 0
1108 +enum ext4_pdo_lk_types {
1109 +#if EXT4_HTREE_LOCK_HASH
1112 + EXT4_LK_DX, /* index block */
1113 + EXT4_LK_DE, /* directory entry block */
1114 + EXT4_LK_SPIN, /* spinlock */
1118 +/* read-only bit */
1119 +#define EXT4_LB_RO(b) (1 << (b))
1120 +/* read + write, high bits for writer */
1121 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1123 +enum ext4_pdo_lock_bits {
1124 + /* DX lock bits */
1125 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
1126 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
1127 + /* DE lock bits */
1128 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
1129 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
1130 + /* DX spinlock bits */
1131 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
1132 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
1133 + /* accurate searching */
1134 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
1137 +enum ext4_pdo_lock_opc {
1139 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1140 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1142 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1144 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1147 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1149 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1150 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1153 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1154 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
1156 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1157 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
1159 +extern void ext4_htree_lock(struct htree_lock *lck,
1160 + struct htree_lock_head *lhead,
1161 + struct inode *dir, unsigned flags);
1162 +#define ext4_htree_unlock(lck) htree_unlock(lck)
1164 +extern struct buffer_head * __ext4_find_entry(struct inode *dir,
1165 + const struct qstr *d_name,
1166 + struct ext4_dir_entry_2 **res_dir,
1167 + struct htree_lock *lck);
1168 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1169 + struct inode *inode, struct htree_lock *lck);
1170 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
1171 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
1173 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
1174 ===================================================================
1175 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
1176 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
1177 @@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
1179 struct dx_hash_info *hinfo,
1180 struct dx_frame *frame,
1182 + struct htree_lock *lck, int *err);
1183 static void dx_release(struct dx_frame *frames);
1184 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1185 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1186 @@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
1187 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1188 struct dx_frame *frame,
1189 struct dx_frame *frames,
1190 - __u32 *start_hash);
1191 + __u32 *start_hash, struct htree_lock *lck);
1192 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1193 const struct qstr *d_name,
1194 struct ext4_dir_entry_2 **res_dir,
1196 + struct htree_lock *lck, int *err);
1197 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1198 - struct inode *inode);
1199 + struct inode *inode, struct htree_lock *lck);
1202 * p is at least 6 bytes before the end of page
1203 @@ -368,6 +368,225 @@ struct stats dx_show_entries(struct dx_h
1205 #endif /* DX_DEBUG */
1207 +/* private data for htree_lock */
1208 +struct ext4_dir_lock_data {
1209 + unsigned ld_flags; /* bits-map for lock types */
1210 + unsigned ld_count; /* # entries of the last DX block */
1211 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
1212 + struct dx_entry *ld_at; /* position of leaf dx_entry */
1215 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
1216 +#define ext4_find_entry(dir, name, dirent) __ext4_find_entry(dir, name, dirent, NULL)
1217 +#define ext4_add_entry(handle, dentry, inode) __ext4_add_entry(handle, dentry, inode, NULL)
1219 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1220 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
1222 +static void ext4_htree_event_cb(void *target, void *event)
1224 + u64 *block = (u64 *)target;
1226 + if (*block == dx_get_block((struct dx_entry *)event))
1227 + *block = EXT4_HTREE_NODE_CHANGED;
1230 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1232 + struct htree_lock_head *lhead;
1234 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1235 + if (lhead != NULL) {
1236 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1237 + ext4_htree_event_cb);
1241 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1243 +struct htree_lock *ext4_htree_lock_alloc(void)
1245 + return htree_lock_alloc(EXT4_LK_MAX,
1246 + sizeof(struct ext4_dir_lock_data));
1248 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1250 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1253 + default: /* 0 or unknown flags require EX lock */
1254 + return HTREE_LOCK_EX;
1255 + case EXT4_HLOCK_READDIR:
1256 + return HTREE_LOCK_PR;
1257 + case EXT4_HLOCK_LOOKUP:
1258 + return HTREE_LOCK_CR;
1259 + case EXT4_HLOCK_DEL:
1260 + case EXT4_HLOCK_ADD:
1261 + return HTREE_LOCK_CW;
1265 +/* return PR for read-only operations, otherwise return EX */
1266 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1268 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1270 + /* 0 requires EX lock */
1271 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1274 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1278 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1281 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1283 + if (writer) /* all readers & writers are excluded? */
1284 + return lck->lk_mode == HTREE_LOCK_EX;
1286 + /* all writers are excluded? */
1287 + return lck->lk_mode == HTREE_LOCK_PR ||
1288 + lck->lk_mode == HTREE_LOCK_PW ||
1289 + lck->lk_mode == HTREE_LOCK_EX;
1292 +/* relock htree_lock with EX mode if it's change operation, otherwise
1293 + * relock it with PR mode. It's noop if PDO is disabled. */
1294 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1296 + if (!ext4_htree_safe_locked(lck)) {
1297 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1299 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
1303 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1304 + struct inode *dir, unsigned flags)
1306 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1307 + ext4_htree_safe_mode(flags);
1309 + ext4_htree_lock_data(lck)->ld_flags = flags;
1310 + htree_lock(lck, lhead, mode);
1312 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1314 +EXPORT_SYMBOL(ext4_htree_lock);
1316 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1317 + unsigned lmask, int wait, void *ev)
1319 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
1322 + /* NOOP if htree is well protected or caller doesn't require the lock */
1323 + if (ext4_htree_safe_locked(lck) ||
1324 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1327 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1328 + HTREE_LOCK_PW : HTREE_LOCK_PR;
1330 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1332 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1334 + cpu_relax(); /* spin until granted */
1338 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1340 + return ext4_htree_safe_locked(lck) ||
1341 + htree_node_is_granted(lck, ffz(~lmask));
1344 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1345 + unsigned lmask, void *buf)
1347 + /* NB: it's safe to call mutiple times or even it's not locked */
1348 + if (!ext4_htree_safe_locked(lck) &&
1349 + htree_node_is_granted(lck, ffz(~lmask)))
1350 + htree_node_unlock(lck, ffz(~lmask), buf);
1353 +#define ext4_htree_dx_lock(lck, key) \
1354 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1355 +#define ext4_htree_dx_lock_try(lck, key) \
1356 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1357 +#define ext4_htree_dx_unlock(lck) \
1358 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1359 +#define ext4_htree_dx_locked(lck) \
1360 + ext4_htree_node_locked(lck, EXT4_LB_DX)
1362 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1364 + struct ext4_dir_lock_data *ld;
1366 + if (ext4_htree_safe_locked(lck))
1369 + ld = ext4_htree_lock_data(lck);
1370 + switch (ld->ld_flags) {
1373 + case EXT4_HLOCK_LOOKUP:
1374 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1376 + case EXT4_HLOCK_DEL:
1377 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1379 + case EXT4_HLOCK_ADD:
1380 + ld->ld_flags = EXT4_HLOCK_SPLIT;
1385 +#define ext4_htree_de_lock(lck, key) \
1386 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1387 +#define ext4_htree_de_unlock(lck) \
1388 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1390 +#define ext4_htree_spin_lock(lck, key, event) \
1391 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1392 +#define ext4_htree_spin_unlock(lck) \
1393 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1394 +#define ext4_htree_spin_unlock_listen(lck, p) \
1395 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1397 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1399 + if (!ext4_htree_safe_locked(lck) &&
1400 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1401 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1405 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
1406 + DX_HASH_COL_YES, /* there is collision and it does matter */
1407 + DX_HASH_COL_NO, /* there is no collision */
1410 +static int dx_probe_hash_collision(struct htree_lock *lck,
1411 + struct dx_entry *entries,
1412 + struct dx_entry *at, u32 hash)
1414 + if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1415 + return DX_HASH_COL_IGNORE; /* don't care about collision */
1417 + } else if (at == entries + dx_get_count(entries) - 1) {
1418 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1420 + } else { /* hash collision? */
1421 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
1422 + DX_HASH_COL_YES : DX_HASH_COL_NO;
1427 * Probe for a directory leaf block to search.
1429 @@ -379,10 +598,11 @@ struct stats dx_show_entries(struct dx_h
1431 static struct dx_frame *
1432 dx_probe(const struct qstr *d_name, struct inode *dir,
1433 - struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1434 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1435 + struct htree_lock *lck, int *err)
1437 unsigned count, indirect;
1438 - struct dx_entry *at, *entries, *p, *q, *m;
1439 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1440 struct dx_root_info * info;
1441 struct buffer_head *bh;
1442 struct dx_frame *frame = frame_in;
1443 @@ -447,8 +667,15 @@ dx_probe(const struct qstr *d_name, stru
1444 dxtrace(printk("Look up %x", hash));
1447 + if (indirect == 0) { /* the last index level */
1448 + /* NB: ext4_htree_dx_lock() could be noop if
1449 + * DX-lock flag is not set for current operation */
1450 + ext4_htree_dx_lock(lck, dx);
1451 + ext4_htree_spin_lock(lck, dx, NULL);
1453 count = dx_get_count(entries);
1454 - if (!count || count > dx_get_limit(entries)) {
1455 + if (count == 0 || count > dx_get_limit(entries)) {
1456 + ext4_htree_spin_unlock(lck); /* release spin */
1457 ext4_warning(dir->i_sb,
1458 "dx entry: no count or count > limit");
1460 @@ -489,9 +716,73 @@ dx_probe(const struct qstr *d_name, stru
1462 frame->entries = entries;
1464 - if (!indirect--) return frame;
1466 + if (indirect == 0) { /* the last index level */
1467 + struct ext4_dir_lock_data *ld;
1470 + /* By default we only lock DE-block, however, we will
1471 + * also lock the last level DX-block if:
1472 + * a) there is hash collision
1473 + * we will set DX-lock flag (a few lines below)
1474 + * and redo to lock DX-block
1475 + * see detail in dx_probe_hash_collision()
1476 + * b) it's a retry from splitting
1477 + * we need to lock the last level DX-block so nobody
1478 + * else can split any leaf blocks under the same
1479 + * DX-block, see detail in ext4_dx_add_entry()
1481 + if (ext4_htree_dx_locked(lck)) {
1482 + /* DX-block is locked, just lock DE-block
1484 + ext4_htree_spin_unlock(lck);
1485 + if (!ext4_htree_safe_locked(lck))
1486 + ext4_htree_de_lock(lck, frame->at);
1489 + /* it's pdirop and no DX lock */
1490 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
1491 + DX_HASH_COL_YES) {
1492 + /* found hash collision, set DX-lock flag
1493 + * and retry to abtain DX-lock */
1494 + ext4_htree_spin_unlock(lck);
1495 + ext4_htree_dx_need_lock(lck);
1498 + ld = ext4_htree_lock_data(lck);
1499 + /* because I don't lock DX, so @at can't be trusted
1500 + * after I release spinlock so I have to save it */
1502 + ld->ld_at_entry = *at;
1503 + ld->ld_count = dx_get_count(entries);
1505 + frame->at = &ld->ld_at_entry;
1506 + myblock = dx_get_block(at);
1508 + /* NB: ordering locking */
1509 + ext4_htree_spin_unlock_listen(lck, &myblock);
1510 + /* other thread can split this DE-block because:
1511 + * a) I don't have lock for the DE-block yet
1512 + * b) I released spinlock on DX-block
1513 + * if it happened I can detect it by listening
1514 + * splitting event on this DE-block */
1515 + ext4_htree_de_lock(lck, frame->at);
1516 + ext4_htree_spin_stop_listen(lck);
1518 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
1519 + /* someone split this DE-block before
1520 + * I locked it, I need to retry and lock
1521 + * valid DE-block */
1522 + ext4_htree_de_unlock(lck);
1529 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
1532 at = entries = ((struct dx_node *) bh->b_data)->entries;
1533 if (dx_get_limit(entries) != dx_node_limit (dir)) {
1534 ext4_warning(dir->i_sb,
1535 @@ -553,7 +844,7 @@ static void dx_release (struct dx_frame
1536 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1537 struct dx_frame *frame,
1538 struct dx_frame *frames,
1539 - __u32 *start_hash)
1540 + __u32 *start_hash, struct htree_lock *lck)
1543 struct buffer_head *bh;
1544 @@ -568,12 +859,22 @@ static int ext4_htree_next_block(struct
1545 * this loop, num_frames indicates the number of interior
1546 * nodes need to be read.
1548 + ext4_htree_de_unlock(lck);
1550 - if (++(p->at) < p->entries + dx_get_count(p->entries))
1552 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1553 + /* num_frames > 0 :
1555 + * ext4_htree_dx_locked:
1556 + * frame->at is reliable pointer returned by dx_probe,
1557 + * otherwise dx_probe already knew no collision */
1558 + if (++(p->at) < p->entries + dx_get_count(p->entries))
1564 + if (num_frames == 1)
1565 + ext4_htree_dx_unlock(lck);
1569 @@ -596,6 +897,13 @@ static int ext4_htree_next_block(struct
1570 * block so no check is necessary
1572 while (num_frames--) {
1573 + if (num_frames == 0) {
1574 + /* it's not always necessary, we just don't want to
1575 + * detect hash collision again */
1576 + ext4_htree_dx_need_lock(lck);
1577 + ext4_htree_dx_lock(lck, p->at);
1580 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
1582 return err; /* Failure */
1583 @@ -604,6 +912,7 @@ static int ext4_htree_next_block(struct
1585 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1587 + ext4_htree_de_lock(lck, p->at);
1591 @@ -696,10 +1005,10 @@ int ext4_htree_fill_tree(struct file *di
1593 hinfo.hash = start_hash;
1594 hinfo.minor_hash = 0;
1595 - frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1596 + /* assume it's PR locked */
1597 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1601 /* Add '.' and '..' from the htree header */
1602 if (!start_hash && !start_minor_hash) {
1603 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1604 @@ -726,7 +1035,7 @@ int ext4_htree_fill_tree(struct file *di
1607 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1608 - frame, frames, &hashval);
1609 + frame, frames, &hashval, NULL);
1610 *next_hash = hashval;
1613 @@ -826,9 +1135,17 @@ static void dx_insert_block(struct dx_fr
1615 static void ext4_update_dx_flag(struct inode *inode)
1617 + /* Disable it for ldiskfs, because going from a DX directory to
1618 + * a non-DX directory while it is in use will completely break
1619 + * the htree-locking.
1620 + * If we really want to support this operation in the future,
1621 + * we need to exclusively lock the directory at here which will
1622 + * increase complexity of code */
1624 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1625 EXT4_FEATURE_COMPAT_DIR_INDEX))
1626 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1631 @@ -900,9 +1217,10 @@ static inline int search_dirblock(struct
1632 * The returned buffer_head has ->b_count elevated. The caller is expected
1633 * to brelse() it when appropriate.
1635 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1636 +struct buffer_head * __ext4_find_entry(struct inode *dir,
1637 const struct qstr *d_name,
1638 - struct ext4_dir_entry_2 ** res_dir)
1639 + struct ext4_dir_entry_2 **res_dir,
1640 + struct htree_lock *lck)
1642 struct super_block *sb;
1643 struct buffer_head *bh_use[NAMEI_RA_SIZE];
1644 @@ -923,7 +1241,7 @@ static struct buffer_head * ext4_find_en
1645 if (namelen > EXT4_NAME_LEN)
1648 - bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1649 + bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1651 * On success, or if the error was file not found,
1652 * return. Otherwise, fall back to doing a search the
1653 @@ -933,6 +1251,7 @@ static struct buffer_head * ext4_find_en
1655 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1657 + ext4_htree_safe_relock(lck);
1659 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1660 start = EXT4_I(dir)->i_dir_start_lookup;
1661 @@ -1008,9 +1327,12 @@ cleanup_and_exit:
1662 brelse(bh_use[ra_ptr]);
1665 +EXPORT_SYMBOL(__ext4_find_entry);
1667 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1668 - struct ext4_dir_entry_2 **res_dir, int *err)
1669 +static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1670 + const struct qstr *d_name,
1671 + struct ext4_dir_entry_2 **res_dir,
1672 + struct htree_lock *lck, int *err)
1674 struct super_block * sb;
1675 struct dx_hash_info hinfo;
1676 @@ -1026,13 +1348,16 @@ static struct buffer_head * ext4_dx_find
1678 /* NFS may look up ".." - look at dx_root directory block */
1679 if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
1680 - if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1681 + if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1685 frame->bh = NULL; /* for dx_release() */
1686 frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
1687 dx_set_block(frame->at, 0); /* dx_root block is 0 */
1688 + /* "." and ".." are stored in root DX lock */
1689 + ext4_htree_dx_need_lock(lck);
1690 + ext4_htree_dx_lock(lck, NULL);
1694 @@ -1061,7 +1386,7 @@ static struct buffer_head * ext4_dx_find
1696 /* Check to see if we should continue to search */
1697 retval = ext4_htree_next_block(dir, hash, frame,
1699 + frames, NULL, lck);
1702 "error reading index page in directory #%lu",
1703 @@ -1244,8 +1569,9 @@ static struct ext4_dir_entry_2* dx_pack_
1704 * Returns pointer to de in block into which the new entry will be inserted.
1706 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1707 - struct buffer_head **bh,struct dx_frame *frame,
1708 - struct dx_hash_info *hinfo, int *error)
1709 + struct buffer_head **bh, struct dx_frame *frames,
1710 + struct dx_frame *frame, struct dx_hash_info *hinfo,
1711 + struct htree_lock *lck, int *error)
1713 unsigned blocksize = dir->i_sb->s_blocksize;
1714 unsigned count, continued;
1715 @@ -1302,7 +1628,14 @@ static struct ext4_dir_entry_2 *do_split
1716 hash2, split, count-split));
1718 /* Fancy dance to stay within two buffers */
1719 - de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1720 + if (hinfo->hash < hash2) {
1721 + de2 = dx_move_dirents(data1, data2, map + split,
1722 + count - split, blocksize);
1724 + /* make sure we will add entry to the same block which
1725 + * we have already locked */
1726 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1728 de = dx_pack_dirents(data1, blocksize);
1729 de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
1731 @@ -1311,13 +1644,21 @@ static struct ext4_dir_entry_2 *do_split
1732 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1733 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1735 - /* Which block gets the new entry? */
1736 - if (hinfo->hash >= hash2)
1740 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1741 + frame->at); /* notify block is being split */
1742 + if (hinfo->hash < hash2) {
1743 + dx_insert_block(frame, hash2 + continued, newblock);
1746 + /* switch block number */
1747 + dx_insert_block(frame, hash2 + continued,
1748 + dx_get_block(frame->at));
1749 + dx_set_block(frame->at, newblock);
1752 - dx_insert_block(frame, hash2 + continued, newblock);
1753 + ext4_htree_spin_unlock(lck);
1754 + ext4_htree_dx_unlock(lck);
1756 err = ext4_handle_dirty_metadata(handle, dir, bh2);
1759 @@ -1558,8 +1899,8 @@ static int make_indexed_dir(handle_t *ha
1760 retval = ext4_handle_dirty_metadata(handle, dir, bh2);
1764 - de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
1765 + de = do_split(handle,dir, &bh2, frames, frame, &hinfo, NULL, &retval);
1769 @@ -1664,8 +2005,8 @@ out:
1770 * may not sleep between calling this and putting something into
1771 * the entry, as someone else might have used it while you slept.
1773 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1774 - struct inode *inode)
1775 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1776 + struct inode *inode, struct htree_lock *lck)
1778 struct inode *dir = dentry->d_parent->d_inode;
1779 struct buffer_head *bh;
1780 @@ -1684,9 +2025,10 @@ static int ext4_add_entry(handle_t *hand
1781 if (dentry->d_name.len == 2 &&
1782 memcmp(dentry->d_name.name, "..", 2) == 0)
1783 return ext4_update_dotdot(handle, dentry, inode);
1784 - retval = ext4_dx_add_entry(handle, dentry, inode);
1785 + retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1786 if (!retval || (retval != ERR_BAD_DX_DIR))
1788 + ext4_htree_safe_relock(lck);
1789 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1791 ext4_mark_inode_dirty(handle, dir);
1792 @@ -1717,12 +2059,13 @@ static int ext4_add_entry(handle_t *hand
1796 +EXPORT_SYMBOL(__ext4_add_entry);
1799 * Returns 0 for success, or a negative error value
1801 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1802 - struct inode *inode)
1803 + struct inode *inode, struct htree_lock *lck)
1805 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1806 struct dx_entry *entries, *at;
1807 @@ -1736,7 +2079,7 @@ static int ext4_dx_add_entry(handle_t *h
1811 - frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1812 + frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1815 entries = frame->entries;
1816 @@ -1763,6 +2106,11 @@ again:
1817 struct dx_node *node2;
1818 struct buffer_head *bh2;
1820 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1821 + ext4_htree_safe_relock(lck);
1825 while (frame > frames) {
1826 if (dx_get_count((frame - 1)->entries) <
1827 dx_get_limit((frame - 1)->entries)) {
1828 @@ -1860,16 +2208,43 @@ again:
1832 + } else if (!ext4_htree_dx_locked(lck)) {
1833 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1835 + /* not well protected, require DX lock */
1836 + ext4_htree_dx_need_lock(lck);
1837 + at = frame > frames ? (frame - 1)->at : NULL;
1839 + /* NB: no risk of deadlock because it's just a try.
1841 + * NB: we check ld_count for twice, the first time before
1842 + * having DX lock, the second time after holding DX lock.
1844 + * NB: We never free blocks for directory so far, which
1845 + * means value returned by dx_get_count() should equal to
1846 + * ld->ld_count if nobody split any DE-block under @at,
1847 + * and ld->ld_at still points to valid dx_entry. */
1848 + if ((ld->ld_count != dx_get_count(entries)) ||
1849 + !ext4_htree_dx_lock_try(lck, at) ||
1850 + (ld->ld_count != dx_get_count(entries))) {
1854 + /* OK, I've got DX lock and nothing changed */
1855 + frame->at = ld->ld_at;
1857 - de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1858 + de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
1862 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1866 ext4_std_error(dir->i_sb, err);
1868 + ext4_htree_dx_unlock(lck);
1869 + ext4_htree_de_unlock(lck);
1873 Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
1874 ===================================================================
1875 --- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/Makefile
1876 +++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
1877 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
1879 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
1880 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
1882 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \