1 --- /dev/null 2011-12-08 11:16:52.000000000 +0800
2 +++ linux-2.6.32-131.6.1-pdo/include/linux/htree_lock.h 2011-12-02 17:09:34.000000000 +0800
5 + * include/linux/htree_lock.h
7 + * Copyright (c) 2011 Whamcloud, Inc.
9 + * Author: Liang Zhen <liang@whamcloud.com>
15 + * htree_lock is an advanced lock, it can support five lock modes (concept is
16 + * taken from DLM) and it's a sleeping lock.
18 + * most common use case is:
19 + * - create a htree_lock_head for data
20 + * - each thread (contender) creates it's own htree_lock
21 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
22 + * call htree_unlock to release lock
24 + * Also, there is advanced use-case which is more complex, user can have
25 + * PW/PR lock on particular key, it's mostly used while user holding shared
26 + * lock on the htree (CW, CR)
28 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
29 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
31 + * htree_node_unlock(lock_node);; unlock the key
33 + * Another tip is, we can have N-levels of this kind of keys, all we need to
34 + * do is specifying N-levels while creating htree_lock_head, then we can
35 + * lock/unlock a specific level by:
36 + * htree_node_lock(lock_node, mode1, key1, level1...);
38 + * htree_node_lock(lock_node, mode1, key2, level2...);
40 + * htree_node_unlock(lock_node, level2);
41 + * htree_node_unlock(lock_node, level1);
43 + * NB: for multi-level, should be careful about locking order to avoid deadlock
46 +#ifndef _LINUX_HTREE_LOCK_H
47 +#define _LINUX_HTREE_LOCK_H
49 +#include <linux/list.h>
50 +#include <linux/spinlock.h>
51 +#include <linux/sched.h>
55 + * more details can be found here:
56 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
59 + HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
60 + HTREE_LOCK_PW, /* protected write: allows only CR users */
61 + HTREE_LOCK_PR, /* protected read: allow PR, CR users */
62 + HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
63 + HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
64 + HTREE_LOCK_MAX, /* number of lock modes */
67 +#define HTREE_LOCK_NL HTREE_LOCK_MAX
68 +#define HTREE_LOCK_INVAL 0xdead10c
71 + HTREE_HBITS_MIN = 2,
72 + HTREE_HBITS_DEF = 14,
73 + HTREE_HBITS_MAX = 32,
77 + HTREE_EVENT_DISABLE = (0),
78 + HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
79 + HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
80 + HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
85 +typedef void (*htree_event_cb_t)(void *target, void *event);
87 +struct htree_lock_child {
88 + struct list_head lc_list; /* granted list */
89 + htree_event_cb_t lc_callback; /* event callback */
90 + unsigned lc_events; /* event types */
93 +struct htree_lock_head {
94 + unsigned long lh_lock; /* bits lock */
95 + /* blocked lock list (htree_lock) */
96 + struct list_head lh_blocked_list;
99 + /* hash bits for key and limit number of locks */
101 + /* counters for blocked locks */
102 + u16 lh_nblocked[HTREE_LOCK_MAX];
103 + /* counters for granted locks */
104 + u16 lh_ngranted[HTREE_LOCK_MAX];
107 + /* array of children locks */
108 + struct htree_lock_child lh_children[0];
111 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
112 +struct htree_lock_node {
113 + htree_lock_mode_t ln_mode;
114 + /* major hash key */
116 + /* minor hash key */
118 + struct list_head ln_major_list;
119 + struct list_head ln_minor_list;
120 + /* alive list, all locks (granted, blocked, listening) are on it */
121 + struct list_head ln_alive_list;
123 + struct list_head ln_blocked_list;
125 + struct list_head ln_granted_list;
126 + void *ln_ev_target;
130 + struct task_struct *lk_task;
131 + struct htree_lock_head *lk_head;
134 + htree_lock_mode_t lk_mode;
135 + struct list_head lk_blocked_list;
136 + struct htree_lock_node lk_nodes[0];
139 +/* create a lock head, which stands for a resource */
140 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
141 + unsigned hbits, unsigned priv);
142 +/* free a lock head */
143 +void htree_lock_head_free(struct htree_lock_head *lhead);
144 +/* register event callback for child lock at level @depth */
145 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
146 + unsigned events, htree_event_cb_t callback);
147 +/* create a lock handle, which stands for a thread */
148 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
149 +/* free a lock handle */
150 +void htree_lock_free(struct htree_lock *lck);
151 +/* lock htree, when @wait is true, 0 is returned if the lock can't
152 + * be granted immediately */
153 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
154 + htree_lock_mode_t mode, int wait);
156 +void htree_unlock(struct htree_lock *lck);
157 +/* unlock and relock htree with @new_mode */
158 +int htree_change_lock_try(struct htree_lock *lck,
159 + htree_lock_mode_t new_mode, int wait);
160 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
161 +/* require child lock (key) of htree at level @dep, @event will be sent to all
162 + * listeners on this @key while lock being granted */
163 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
164 + u32 key, unsigned dep, int wait, void *event);
165 +/* release child lock at level @dep, this lock will listen on it's key
166 + * if @event isn't NULL, event_cb will be called against @lck while granting
167 + * any other lock at level @dep with the same key */
168 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
169 +/* stop listening on child lock at level @dep */
170 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
172 +void htree_lock_stat_print(int depth);
173 +void htree_lock_stat_reset(void);
175 +#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
176 +#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
178 +#define htree_lock_mode(lck) ((lck)->lk_mode)
180 +#define htree_node_lock(lck, mode, key, dep) \
181 + htree_node_lock_try(lck, mode, key, dep, 1, NULL)
182 +/* this is only safe in thread context of lock owner */
183 +#define htree_node_is_granted(lck, dep) \
184 + ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
185 + (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
186 +/* this is only safe in thread context of lock owner */
187 +#define htree_node_is_listening(lck, dep) \
188 + ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
191 --- /dev/null 2011-12-08 11:16:52.000000000 +0800
192 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/htree_lock.c 2011-12-08 18:18:18.000000000 +0800
195 + * fs/ext4/htree_lock.c
197 + * Copyright (c) 2011 Whamcloud, Inc.
199 + * Author: Liang Zhen <liang@whamcloud.com>
201 +#include <linux/jbd2.h>
202 +#include <linux/hash.h>
203 +#include <linux/module.h>
204 +#include <linux/htree_lock.h>
207 + HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
208 + HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
209 + HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
210 + HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
211 + HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
215 + HTREE_LOCK_COMPAT_EX = 0,
216 + HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
217 + HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
218 + HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
219 + HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
223 +static int htree_lock_compat[] = {
224 + [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
225 + [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
226 + [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
227 + [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
228 + [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
231 +/* max allowed htree-lock depth.
232 + * We only need depth=3 for ext4 although user can have higher value. */
233 +#define HTREE_LOCK_DEP_MAX 16
235 +#ifdef HTREE_LOCK_DEBUG
237 +static char *hl_name[] = {
238 + [HTREE_LOCK_EX] "EX",
239 + [HTREE_LOCK_PW] "PW",
240 + [HTREE_LOCK_PR] "PR",
241 + [HTREE_LOCK_CW] "CW",
242 + [HTREE_LOCK_CR] "CR",
246 +struct htree_lock_node_stats {
247 + unsigned long long blocked[HTREE_LOCK_MAX];
248 + unsigned long long granted[HTREE_LOCK_MAX];
249 + unsigned long long retried[HTREE_LOCK_MAX];
250 + unsigned long long events;
253 +struct htree_lock_stats {
254 + struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
255 + unsigned long long granted[HTREE_LOCK_MAX];
256 + unsigned long long blocked[HTREE_LOCK_MAX];
259 +static struct htree_lock_stats hl_stats;
261 +void htree_lock_stat_reset(void)
263 + memset(&hl_stats, 0, sizeof(hl_stats));
266 +void htree_lock_stat_print(int depth)
271 + printk(KERN_DEBUG "HTREE LOCK STATS:\n");
272 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
273 + printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
274 + hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
276 + for (i = 0; i < depth; i++) {
277 + printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
278 + for (j = 0; j < HTREE_LOCK_MAX; j++) {
280 + "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
281 + hl_name[j], hl_stats.nodes[i].granted[j],
282 + hl_stats.nodes[i].blocked[j],
283 + hl_stats.nodes[i].retried[j]);
288 +#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
289 +#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
290 +#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
291 +#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
292 +#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
293 +#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
297 +void htree_lock_stat_reset(void) {}
298 +void htree_lock_stat_print(int depth) {}
300 +#define lk_grant_inc(m) do {} while (0)
301 +#define lk_block_inc(m) do {} while (0)
302 +#define ln_grant_inc(d, m) do {} while (0)
303 +#define ln_block_inc(d, m) do {} while (0)
304 +#define ln_retry_inc(d, m) do {} while (0)
305 +#define ln_event_inc(d) do {} while (0)
309 +EXPORT_SYMBOL(htree_lock_stat_reset);
310 +EXPORT_SYMBOL(htree_lock_stat_print);
312 +#define HTREE_DEP_ROOT (-1)
314 +#define htree_spin_lock(lhead, dep) \
315 + bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
316 +#define htree_spin_unlock(lhead, dep) \
317 + bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
319 +#define htree_key_event_ignore(child, ln) \
320 + (!((child)->lc_events & (1 << (ln)->ln_mode)))
323 +htree_key_list_empty(struct htree_lock_node *ln)
325 + return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
329 +htree_key_list_del_init(struct htree_lock_node *ln)
331 + struct htree_lock_node *tmp = NULL;
333 + if (!list_empty(&ln->ln_minor_list)) {
334 + tmp = list_entry(ln->ln_minor_list.next,
335 + struct htree_lock_node, ln_minor_list);
336 + list_del_init(&ln->ln_minor_list);
339 + if (list_empty(&ln->ln_major_list))
342 + if (tmp == NULL) { /* not on minor key list */
343 + list_del_init(&ln->ln_major_list);
345 + BUG_ON(!list_empty(&tmp->ln_major_list));
346 + list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
351 +htree_key_list_replace_init(struct htree_lock_node *old,
352 + struct htree_lock_node *new)
354 + if (!list_empty(&old->ln_major_list))
355 + list_replace_init(&old->ln_major_list, &new->ln_major_list);
357 + if (!list_empty(&old->ln_minor_list))
358 + list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
362 +htree_key_event_enqueue(struct htree_lock_child *child,
363 + struct htree_lock_node *ln, int dep, void *event)
365 + struct htree_lock_node *tmp;
367 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
368 + BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
369 + if (event == NULL || htree_key_event_ignore(child, ln))
372 + /* shouldn't be a very long list */
373 + list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
374 + if (tmp->ln_mode == HTREE_LOCK_NL) {
376 + if (child->lc_callback != NULL)
377 + child->lc_callback(tmp->ln_ev_target, event);
383 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
384 + unsigned dep, int wait, void *event)
386 + struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
387 + struct htree_lock_node *newln = &newlk->lk_nodes[dep];
388 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
390 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
391 + /* NB: we only expect PR/PW lock mode at here, only these two modes are
392 + * allowed for htree_node_lock(asserted in htree_node_lock_internal),
393 + * NL is only used for listener, user can't directly require NL mode */
394 + if ((curln->ln_mode == HTREE_LOCK_NL) ||
395 + (curln->ln_mode != HTREE_LOCK_PW &&
396 + newln->ln_mode != HTREE_LOCK_PW)) {
397 + /* no conflict, attach it on granted list of @curlk */
398 + if (curln->ln_mode != HTREE_LOCK_NL) {
399 + list_add(&newln->ln_granted_list,
400 + &curln->ln_granted_list);
402 + /* replace key owner */
403 + htree_key_list_replace_init(curln, newln);
406 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
407 + htree_key_event_enqueue(child, newln, dep, event);
408 + ln_grant_inc(dep, newln->ln_mode);
409 + return 1; /* still hold lh_lock */
412 + if (!wait) { /* can't grant and don't want to wait */
413 + ln_retry_inc(dep, newln->ln_mode);
414 + newln->ln_mode = HTREE_LOCK_INVAL;
415 + return -1; /* don't wait and just return -1 */
418 + newlk->lk_task = current;
419 + set_current_state(TASK_UNINTERRUPTIBLE);
420 + /* conflict, attach it on blocked list of curlk */
421 + list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
422 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
423 + ln_block_inc(dep, newln->ln_mode);
425 + htree_spin_unlock(newlk->lk_head, dep);
426 + /* wait to be given the lock */
427 + if (newlk->lk_task != NULL)
429 + /* granted, no doubt, wake up will set me RUNNING */
430 + if (event == NULL || htree_key_event_ignore(child, newln))
431 + return 0; /* granted without lh_lock */
433 + htree_spin_lock(newlk->lk_head, dep);
434 + htree_key_event_enqueue(child, newln, dep, event);
435 + return 1; /* still hold lh_lock */
439 + * get PR/PW access to particular tree-node according to @dep and @key,
440 + * it will return -1 if @wait is false and can't immediately grant this lock.
441 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
442 + * @event if it's not NULL.
443 + * NB: ALWAYS called holding lhead::lh_lock
446 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
447 + htree_lock_mode_t mode, u32 key, unsigned dep,
448 + int wait, void *event)
451 + struct htree_lock *tmp;
452 + struct htree_lock *tmp2;
459 + BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
460 + BUG_ON(htree_node_is_granted(lck, dep));
462 + key = hash_long(key, lhead->lh_hbits);
464 + mi_bits = lhead->lh_hbits >> 1;
465 + ma_bits = lhead->lh_hbits - mi_bits;
467 + lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
468 + lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
469 + lck->lk_nodes[dep].ln_mode = mode;
472 + * The major key list is an ordered list, so searches are started
473 + * at the end of the list that is numerically closer to major_key,
474 + * so at most half of the list will be walked (for well-distributed
475 + * keys). The list traversal aborts early if the expected key
476 + * location is passed.
478 + reverse = (major >= (1 << (ma_bits - 1)));
481 + list_for_each_entry_reverse(tmp,
482 + &lhead->lh_children[dep].lc_list,
483 + lk_nodes[dep].ln_major_list) {
484 + if (tmp->lk_nodes[dep].ln_major_key == major) {
487 + } else if (tmp->lk_nodes[dep].ln_major_key < major) {
488 + /* attach _after_ @tmp */
489 + list_add(&lck->lk_nodes[dep].ln_major_list,
490 + &tmp->lk_nodes[dep].ln_major_list);
491 + goto out_grant_major;
495 + list_add(&lck->lk_nodes[dep].ln_major_list,
496 + &lhead->lh_children[dep].lc_list);
497 + goto out_grant_major;
500 + list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
501 + lk_nodes[dep].ln_major_list) {
502 + if (tmp->lk_nodes[dep].ln_major_key == major) {
505 + } else if (tmp->lk_nodes[dep].ln_major_key > major) {
506 + /* insert _before_ @tmp */
507 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
508 + &tmp->lk_nodes[dep].ln_major_list);
509 + goto out_grant_major;
513 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
514 + &lhead->lh_children[dep].lc_list);
515 + goto out_grant_major;
520 + * NB: minor_key list doesn't have a "head", @list is just a
521 + * temporary stub for helping list searching, make sure it's removed
523 + * minor_key list is an ordered list too.
525 + list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
527 + reverse = (minor >= (1 << (mi_bits - 1)));
530 + list_for_each_entry_reverse(tmp2, &list,
531 + lk_nodes[dep].ln_minor_list) {
532 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
535 + } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
536 + /* attach _after_ @tmp2 */
537 + list_add(&lck->lk_nodes[dep].ln_minor_list,
538 + &tmp2->lk_nodes[dep].ln_minor_list);
539 + goto out_grant_minor;
543 + list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
546 + list_for_each_entry(tmp2, &list,
547 + lk_nodes[dep].ln_minor_list) {
548 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
551 + } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
552 + /* insert _before_ @tmp2 */
553 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
554 + &tmp2->lk_nodes[dep].ln_minor_list);
555 + goto out_grant_minor;
559 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
563 + if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
564 + /* new lock @lck is the first one on minor_key list, which
565 + * means it has the smallest minor_key and it should
566 + * replace @tmp as minor_key owner */
567 + list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
568 + &lck->lk_nodes[dep].ln_major_list);
570 + /* remove the temporary head */
574 + ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
575 + return 1; /* granted with holding lh_lock */
578 + list_del(&list); /* remove temprary head */
579 + return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
583 + * release the key of @lck at level @dep, and grant any blocked locks.
584 + * caller will still listen on @key if @event is not NULL, which means
585 + * caller can see a event (by event_cb) while granting any lock with
586 + * the same key at level @dep.
587 + * NB: ALWAYS called holding lhead::lh_lock
588 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
591 +htree_node_unlock_internal(struct htree_lock_head *lhead,
592 + struct htree_lock *curlk, unsigned dep, void *event)
594 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
595 + struct htree_lock *grtlk = NULL;
596 + struct htree_lock_node *grtln;
597 + struct htree_lock *poslk;
598 + struct htree_lock *tmplk;
600 + if (!htree_node_is_granted(curlk, dep))
603 + if (!list_empty(&curln->ln_granted_list)) {
604 + /* there is another granted lock */
605 + grtlk = list_entry(curln->ln_granted_list.next,
607 + lk_nodes[dep].ln_granted_list);
608 + list_del_init(&curln->ln_granted_list);
611 + if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
613 + * @curlk is the only granted lock, so we confirmed:
614 + * a) curln is key owner (attached on major/minor_list),
615 + * so if there is any blocked lock, it should be attached
616 + * on curln->ln_blocked_list
617 + * b) we always can grant the first blocked lock
619 + grtlk = list_entry(curln->ln_blocked_list.next,
621 + lk_nodes[dep].ln_blocked_list);
622 + BUG_ON(grtlk->lk_task == NULL);
623 + wake_up_process(grtlk->lk_task);
626 + if (event != NULL &&
627 + lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
628 + curln->ln_ev_target = event;
629 + curln->ln_mode = HTREE_LOCK_NL; /* listen! */
631 + curln->ln_mode = HTREE_LOCK_INVAL;
634 + if (grtlk == NULL) { /* I must be the only one locking this key */
635 + struct htree_lock_node *tmpln;
637 + BUG_ON(htree_key_list_empty(curln));
639 + if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
642 + /* not listening */
643 + if (list_empty(&curln->ln_alive_list)) { /* no more listener */
644 + htree_key_list_del_init(curln);
648 + tmpln = list_entry(curln->ln_alive_list.next,
649 + struct htree_lock_node, ln_alive_list);
651 + BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
653 + htree_key_list_replace_init(curln, tmpln);
654 + list_del_init(&curln->ln_alive_list);
659 + /* have a granted lock */
660 + grtln = &grtlk->lk_nodes[dep];
661 + if (!list_empty(&curln->ln_blocked_list)) {
662 + /* only key owner can on both lists */
663 + BUG_ON(htree_key_list_empty(curln));
665 + if (list_empty(&grtln->ln_blocked_list)) {
666 + list_add(&grtln->ln_blocked_list,
667 + &curln->ln_blocked_list);
669 + list_del_init(&curln->ln_blocked_list);
672 + * NB: this is the tricky part:
673 + * We have only two modes for child-lock (PR and PW), also,
674 + * only owner of the key (attached on major/minor_list) can be on
675 + * both blocked_list and granted_list, so @grtlk must be one
676 + * of these two cases:
678 + * a) @grtlk is taken from granted_list, which means we've granted
679 + * more than one lock so @grtlk has to be PR, the first blocked
680 + * lock must be PW and we can't grant it at all.
681 + * So even @grtlk is not owner of the key (empty blocked_list),
682 + * we don't care because we can't grant any lock.
683 + * b) we just grant a new lock which is taken from head of blocked
684 + * list, and it should be the first granted lock, and it should
685 + * be the first one linked on blocked_list.
687 + * Either way, we can get correct result by iterating blocked_list
688 + * of @grtlk, and don't have to bother on how to find out
689 + * owner of current key.
691 + list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
692 + lk_nodes[dep].ln_blocked_list) {
693 + if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
694 + poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
696 + /* grant all readers */
697 + list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
698 + list_add(&poslk->lk_nodes[dep].ln_granted_list,
699 + &grtln->ln_granted_list);
701 + BUG_ON(poslk->lk_task == NULL);
702 + wake_up_process(poslk->lk_task);
705 + /* if @curln is the owner of this key, replace it with @grtln */
706 + if (!htree_key_list_empty(curln))
707 + htree_key_list_replace_init(curln, grtln);
709 + if (curln->ln_mode == HTREE_LOCK_INVAL)
710 + list_del_init(&curln->ln_alive_list);
714 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
715 + * and 0 only if @wait is false and can't grant it immediately
718 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
719 + u32 key, unsigned dep, int wait, void *event)
721 + struct htree_lock_head *lhead = lck->lk_head;
724 + BUG_ON(dep >= lck->lk_depth);
725 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
727 + htree_spin_lock(lhead, dep);
728 + rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
730 + htree_spin_unlock(lhead, dep);
733 +EXPORT_SYMBOL(htree_node_lock_try);
735 +/* it's wrapper of htree_node_unlock_internal */
737 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
739 + struct htree_lock_head *lhead = lck->lk_head;
741 + BUG_ON(dep >= lck->lk_depth);
742 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
744 + htree_spin_lock(lhead, dep);
745 + htree_node_unlock_internal(lhead, lck, dep, event);
746 + htree_spin_unlock(lhead, dep);
748 +EXPORT_SYMBOL(htree_node_unlock);
750 +/* stop listening on child-lock level @dep */
752 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
754 + struct htree_lock_node *ln = &lck->lk_nodes[dep];
755 + struct htree_lock_node *tmp;
757 + BUG_ON(htree_node_is_granted(lck, dep));
758 + BUG_ON(!list_empty(&ln->ln_blocked_list));
759 + BUG_ON(!list_empty(&ln->ln_granted_list));
761 + if (!htree_node_is_listening(lck, dep))
764 + htree_spin_lock(lck->lk_head, dep);
765 + ln->ln_mode = HTREE_LOCK_INVAL;
766 + ln->ln_ev_target = NULL;
768 + if (htree_key_list_empty(ln)) { /* not owner */
769 + list_del_init(&ln->ln_alive_list);
773 + /* I'm the owner... */
774 + if (list_empty(&ln->ln_alive_list)) { /* no more listener */
775 + htree_key_list_del_init(ln);
779 + tmp = list_entry(ln->ln_alive_list.next,
780 + struct htree_lock_node, ln_alive_list);
782 + BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
783 + htree_key_list_replace_init(ln, tmp);
784 + list_del_init(&ln->ln_alive_list);
786 + htree_spin_unlock(lck->lk_head, dep);
788 +EXPORT_SYMBOL(htree_node_stop_listen);
790 +/* release all child-locks if we have any */
792 +htree_node_release_all(struct htree_lock *lck)
796 + for (i = 0; i < lck->lk_depth; i++) {
797 + if (htree_node_is_granted(lck, i))
798 + htree_node_unlock(lck, i, NULL);
799 + else if (htree_node_is_listening(lck, i))
800 + htree_node_stop_listen(lck, i);
805 + * obtain htree lock, it could be blocked inside if there's conflict
806 + * with any granted or blocked lock and @wait is true.
807 + * NB: ALWAYS called holding lhead::lh_lock
810 +htree_lock_internal(struct htree_lock *lck, int wait)
812 + struct htree_lock_head *lhead = lck->lk_head;
817 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
818 + if (lhead->lh_ngranted[i] != 0)
820 + if (lhead->lh_nblocked[i] != 0)
823 + if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
824 + (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
825 + /* will block current lock even it just conflicts with any
826 + * other blocked lock, so lock like EX wouldn't starve */
829 + lhead->lh_nblocked[lck->lk_mode]++;
830 + lk_block_inc(lck->lk_mode);
832 + lck->lk_task = current;
833 + list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
835 + set_current_state(TASK_UNINTERRUPTIBLE);
836 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
837 + /* wait to be given the lock */
838 + if (lck->lk_task != NULL)
840 + /* granted, no doubt. wake up will set me RUNNING */
841 + return 0; /* without lh_lock */
843 + lhead->lh_ngranted[lck->lk_mode]++;
844 + lk_grant_inc(lck->lk_mode);
848 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
850 +htree_unlock_internal(struct htree_lock *lck)
852 + struct htree_lock_head *lhead = lck->lk_head;
853 + struct htree_lock *tmp;
854 + struct htree_lock *tmp2;
858 + BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
860 + lhead->lh_ngranted[lck->lk_mode]--;
861 + lck->lk_mode = HTREE_LOCK_INVAL;
863 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
864 + if (lhead->lh_ngranted[i] != 0)
867 + list_for_each_entry_safe(tmp, tmp2,
868 + &lhead->lh_blocked_list, lk_blocked_list) {
869 + /* conflict with any granted lock? */
870 + if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
873 + list_del_init(&tmp->lk_blocked_list);
875 + BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
877 + lhead->lh_nblocked[tmp->lk_mode]--;
878 + lhead->lh_ngranted[tmp->lk_mode]++;
879 + granted |= 1 << tmp->lk_mode;
881 + BUG_ON(tmp->lk_task == NULL);
882 + wake_up_process(tmp->lk_task);
886 +/* it's wrapper of htree_lock_internal and exported interface.
887 + * It always return 1 with granted lock if @wait is true, it can return 0
888 + * if @wait is false and locking request can't be granted immediately */
890 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
891 + htree_lock_mode_t mode, int wait)
895 + BUG_ON(lck->lk_depth > lhead->lh_depth);
896 + BUG_ON(lck->lk_head != NULL);
897 + BUG_ON(lck->lk_task != NULL);
899 + lck->lk_head = lhead;
900 + lck->lk_mode = mode;
902 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
903 + rc = htree_lock_internal(lck, wait);
905 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
908 +EXPORT_SYMBOL(htree_lock_try);
910 +/* it's wrapper of htree_unlock_internal and exported interface.
911 + * It will release all htree_node_locks and htree_lock */
913 +htree_unlock(struct htree_lock *lck)
915 + BUG_ON(lck->lk_head == NULL);
916 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
918 + htree_node_release_all(lck);
920 + htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
921 + htree_unlock_internal(lck);
922 + htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
923 + lck->lk_head = NULL;
924 + lck->lk_task = NULL;
926 +EXPORT_SYMBOL(htree_unlock);
928 +/* change lock mode */
930 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
932 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
933 + lck->lk_mode = mode;
935 +EXPORT_SYMBOL(htree_change_mode);
937 +/* release htree lock, and lock it again with new mode.
938 + * This function will first release all htree_node_locks and htree_lock,
939 + * then try to gain htree_lock with new @mode.
940 + * It always return 1 with granted lock if @wait is true, it can return 0
941 + * if @wait is false and locking request can't be granted immediately */
943 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
945 + struct htree_lock_head *lhead = lck->lk_head;
948 + BUG_ON(lhead == NULL);
949 + BUG_ON(lck->lk_mode == mode);
950 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
952 + htree_node_release_all(lck);
954 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
955 + htree_unlock_internal(lck);
956 + lck->lk_mode = mode;
957 + rc = htree_lock_internal(lck, wait);
959 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
962 +EXPORT_SYMBOL(htree_change_lock_try);
964 +/* create a htree_lock head with @depth levels (number of child-locks),
965 + * it is a per resoruce structure */
966 +struct htree_lock_head *
967 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
969 + struct htree_lock_head *lhead;
972 + if (depth > HTREE_LOCK_DEP_MAX) {
973 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
974 + depth, HTREE_LOCK_DEP_MAX);
978 + lhead = kzalloc(offsetof(struct htree_lock_head,
979 + lh_children[depth]) + priv, GFP_NOFS);
983 + if (hbits < HTREE_HBITS_MIN)
984 + lhead->lh_hbits = HTREE_HBITS_MIN;
985 + else if (hbits > HTREE_HBITS_MAX)
986 + lhead->lh_hbits = HTREE_HBITS_MAX;
988 + lhead->lh_lock = 0;
989 + lhead->lh_depth = depth;
990 + INIT_LIST_HEAD(&lhead->lh_blocked_list);
992 + lhead->lh_private = (void *)lhead +
993 + offsetof(struct htree_lock_head, lh_children[depth]);
996 + for (i = 0; i < depth; i++) {
997 + INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
998 + lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1002 +EXPORT_SYMBOL(htree_lock_head_alloc);
1004 +/* free the htree_lock head */
1006 +htree_lock_head_free(struct htree_lock_head *lhead)
1010 + BUG_ON(!list_empty(&lhead->lh_blocked_list));
1011 + for (i = 0; i < lhead->lh_depth; i++)
1012 + BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1015 +EXPORT_SYMBOL(htree_lock_head_free);
1017 +/* register event callback for @events of child-lock at level @dep */
1019 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1020 + unsigned events, htree_event_cb_t callback)
1022 + BUG_ON(lhead->lh_depth <= dep);
1023 + lhead->lh_children[dep].lc_events = events;
1024 + lhead->lh_children[dep].lc_callback = callback;
1026 +EXPORT_SYMBOL(htree_lock_event_attach);
1028 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1029 + * extra-bytes as private data for caller */
1030 +struct htree_lock *
1031 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1033 + struct htree_lock *lck;
1034 + int i = offsetof(struct htree_lock, lk_nodes[depth]);
1036 + if (depth > HTREE_LOCK_DEP_MAX) {
1037 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1038 + depth, HTREE_LOCK_DEP_MAX);
1041 + lck = kzalloc(i + pbytes, GFP_NOFS);
1046 + lck->lk_private = (void *)lck + i;
1047 + lck->lk_mode = HTREE_LOCK_INVAL;
1048 + lck->lk_depth = depth;
1049 + INIT_LIST_HEAD(&lck->lk_blocked_list);
1051 + for (i = 0; i < depth; i++) {
1052 + struct htree_lock_node *node = &lck->lk_nodes[i];
1054 + node->ln_mode = HTREE_LOCK_INVAL;
1055 + INIT_LIST_HEAD(&node->ln_major_list);
1056 + INIT_LIST_HEAD(&node->ln_minor_list);
1057 + INIT_LIST_HEAD(&node->ln_alive_list);
1058 + INIT_LIST_HEAD(&node->ln_blocked_list);
1059 + INIT_LIST_HEAD(&node->ln_granted_list);
1064 +EXPORT_SYMBOL(htree_lock_alloc);
1066 +/* free htree_lock node */
1068 +htree_lock_free(struct htree_lock *lck)
1070 + BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1073 +EXPORT_SYMBOL(htree_lock_free);
1074 --- linux-2.6.32-131.6.1/fs/ext4/ext4.h 2011-10-06 20:10:49.000000000 +0800
1075 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/ext4.h 2011-12-08 18:25:00.000000000 +0800
1077 #include <linux/mutex.h>
1078 #include <linux/timer.h>
1079 #include <linux/wait.h>
1080 +#include <linux/htree_lock.h>
1081 #include <linux/blockgroup_lock.h>
1082 #include <linux/percpu_counter.h>
1084 @@ -1277,6 +1278,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
1085 #define EXT4_FEATURE_INCOMPAT_MMP 0x0100
1086 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
1087 #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000
1088 +#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000
1090 #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
1091 #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
1092 @@ -1286,7 +1288,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
1093 EXT4_FEATURE_INCOMPAT_64BIT| \
1094 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
1095 EXT4_FEATURE_INCOMPAT_MMP| \
1096 - EXT4_FEATURE_INCOMPAT_DIRDATA)
1097 + EXT4_FEATURE_INCOMPAT_DIRDATA| \
1098 + EXT4_FEATURE_INCOMPAT_LARGEDIR)
1100 #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1101 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1102 @@ -1536,6 +1539,76 @@ ext4_group_first_block_no(struct super_b
1104 #define ERR_BAD_DX_DIR -75000
1106 +/* htree levels for ext4 */
1107 +#define EXT4_HTREE_LEVEL_COMPAT 2
1108 +#define EXT4_HTREE_LEVEL 3
1111 +ext4_dir_htree_level(struct super_block *sb)
1113 + return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
1114 + EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
1117 +/* assume name-hash is protected by upper layer */
1118 +#define EXT4_HTREE_LOCK_HASH 0
1120 +enum ext4_pdo_lk_types {
1121 +#if EXT4_HTREE_LOCK_HASH
1124 + EXT4_LK_DX, /* index block */
1125 + EXT4_LK_DE, /* directory entry block */
1126 + EXT4_LK_SPIN, /* spinlock */
1130 +/* read-only bit */
1131 +#define EXT4_LB_RO(b) (1 << (b))
1132 +/* read + write, high bits for writer */
1133 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1135 +enum ext4_pdo_lock_bits {
1136 + /* DX lock bits */
1137 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
1138 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
1139 + /* DE lock bits */
1140 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
1141 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
1142 + /* DX spinlock bits */
1143 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
1144 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
1145 + /* accurate searching */
1146 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
1149 +enum ext4_pdo_lock_opc {
1151 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1152 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1154 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1156 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1159 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1161 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1162 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1165 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1166 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
1168 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1169 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
1171 +extern void ext4_htree_lock(struct htree_lock *lck,
1172 + struct htree_lock_head *lhead,
1173 + struct inode *dir, unsigned flags);
1174 +#define ext4_htree_unlock(lck) htree_unlock(lck)
1176 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
1177 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
1179 @@ -1769,14 +1842,16 @@ extern int ext4_htree_fill_tree(struct f
1180 extern struct inode *ext4_create_inode(handle_t *handle,
1181 struct inode * dir, int mode);
1182 extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1183 - struct inode *inode);
1184 + struct inode *inode, struct htree_lock *lck);
1185 extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
1186 struct ext4_dir_entry_2 * de_del,
1187 struct buffer_head * bh);
1188 extern struct buffer_head * ext4_find_entry(struct inode *dir,
1189 const struct qstr *d_name,
1190 - struct ext4_dir_entry_2 ** res_dir);
1191 -#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
1192 + struct ext4_dir_entry_2 **res_dir,
1193 + struct htree_lock *lck);
1194 +#define ll_ext4_find_entry(inode, dentry, res_dir, lck) \
1195 + ext4_find_entry(inode, &(dentry)->d_name, res_dir, lck)
1196 extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
1197 struct inode *inode, const void *, const void *);
1198 extern struct buffer_head *ext4_append(handle_t *handle,
1199 @@ -1893,13 +1968,15 @@ static inline void ext4_r_blocks_count_s
1200 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
1203 -static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
1204 +static inline loff_t ext4_isize(struct super_block *sb,
1205 + struct ext4_inode *raw_inode)
1207 - if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
1208 + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
1209 + S_ISREG(le16_to_cpu(raw_inode->i_mode)))
1210 return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
1211 le32_to_cpu(raw_inode->i_size_lo);
1213 - return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
1215 + return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
1218 static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
1219 --- linux-2.6.32-131.6.1/fs/ext4/namei.c 2011-10-06 20:10:49.000000000 +0800
1220 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/namei.c 2011-12-08 15:40:39.000000000 +0800
1221 @@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
1223 struct dx_hash_info *hinfo,
1224 struct dx_frame *frame,
1226 + struct htree_lock *lck, int *err);
1227 static void dx_release(struct dx_frame *frames);
1228 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1229 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1230 @@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
1231 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1232 struct dx_frame *frame,
1233 struct dx_frame *frames,
1234 - __u32 *start_hash);
1235 + __u32 *start_hash, struct htree_lock *lck);
1236 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1237 const struct qstr *d_name,
1238 struct ext4_dir_entry_2 **res_dir,
1240 + struct htree_lock *lck, int *err);
1241 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1242 - struct inode *inode);
1243 + struct inode *inode, struct htree_lock *lck);
1246 * p is at least 6 bytes before the end of page
1247 @@ -225,7 +225,7 @@ struct dx_root_info * dx_get_dx_info(str
1249 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
1251 - return le32_to_cpu(entry->block) & 0x00ffffff;
1252 + return le32_to_cpu(entry->block) & 0x0fffffff;
1255 static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
1256 @@ -298,7 +298,7 @@ static void dx_show_index(char * label,
1265 @@ -368,6 +368,223 @@ struct stats dx_show_entries(struct dx_h
1267 #endif /* DX_DEBUG */
1269 +/* private data for htree_lock */
1270 +struct ext4_dir_lock_data {
1271 + unsigned ld_flags; /* bits-map for lock types */
1272 + unsigned ld_count; /* # entries of the last DX block */
1273 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
1274 + struct dx_entry *ld_at; /* position of leaf dx_entry */
1277 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
1279 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1280 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
1282 +static void ext4_htree_event_cb(void *target, void *event)
1284 + u64 *block = (u64 *)target;
1286 + if (*block == dx_get_block((struct dx_entry *)event))
1287 + *block = EXT4_HTREE_NODE_CHANGED;
1290 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1292 + struct htree_lock_head *lhead;
1294 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1295 + if (lhead != NULL) {
1296 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1297 + ext4_htree_event_cb);
1301 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1303 +struct htree_lock *ext4_htree_lock_alloc(void)
1305 + return htree_lock_alloc(EXT4_LK_MAX,
1306 + sizeof(struct ext4_dir_lock_data));
1308 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1310 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1313 + default: /* 0 or unknown flags require EX lock */
1314 + return HTREE_LOCK_EX;
1315 + case EXT4_HLOCK_READDIR:
1316 + return HTREE_LOCK_PR;
1317 + case EXT4_HLOCK_LOOKUP:
1318 + return HTREE_LOCK_CR;
1319 + case EXT4_HLOCK_DEL:
1320 + case EXT4_HLOCK_ADD:
1321 + return HTREE_LOCK_CW;
1325 +/* return PR for read-only operations, otherwise return EX */
1326 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1328 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1330 + /* 0 requires EX lock */
1331 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1334 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1338 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1341 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1343 + if (writer) /* all readers & writers are excluded? */
1344 + return lck->lk_mode == HTREE_LOCK_EX;
1346 + /* all writers are excluded? */
1347 + return lck->lk_mode == HTREE_LOCK_PR ||
1348 + lck->lk_mode == HTREE_LOCK_PW ||
1349 + lck->lk_mode == HTREE_LOCK_EX;
1352 +/* relock htree_lock with EX mode if it's change operation, otherwise
1353 + * relock it with PR mode. It's noop if PDO is disabled. */
1354 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1356 + if (!ext4_htree_safe_locked(lck)) {
1357 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1359 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
1363 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1364 + struct inode *dir, unsigned flags)
1366 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1367 + ext4_htree_safe_mode(flags);
1369 + ext4_htree_lock_data(lck)->ld_flags = flags;
1370 + htree_lock(lck, lhead, mode);
1372 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1374 +EXPORT_SYMBOL(ext4_htree_lock);
1376 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1377 + unsigned lmask, int wait, void *ev)
1379 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
1382 + /* NOOP if htree is well protected or caller doesn't require the lock */
1383 + if (ext4_htree_safe_locked(lck) ||
1384 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1387 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1388 + HTREE_LOCK_PW : HTREE_LOCK_PR;
1390 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1392 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1394 + cpu_relax(); /* spin until granted */
1398 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1400 + return ext4_htree_safe_locked(lck) ||
1401 + htree_node_is_granted(lck, ffz(~lmask));
1404 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1405 + unsigned lmask, void *buf)
1407 + /* NB: it's safe to call mutiple times or even it's not locked */
1408 + if (!ext4_htree_safe_locked(lck) &&
1409 + htree_node_is_granted(lck, ffz(~lmask)))
1410 + htree_node_unlock(lck, ffz(~lmask), buf);
1413 +#define ext4_htree_dx_lock(lck, key) \
1414 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1415 +#define ext4_htree_dx_lock_try(lck, key) \
1416 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1417 +#define ext4_htree_dx_unlock(lck) \
1418 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1419 +#define ext4_htree_dx_locked(lck) \
1420 + ext4_htree_node_locked(lck, EXT4_LB_DX)
1422 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1424 + struct ext4_dir_lock_data *ld;
1426 + if (ext4_htree_safe_locked(lck))
1429 + ld = ext4_htree_lock_data(lck);
1430 + switch (ld->ld_flags) {
1433 + case EXT4_HLOCK_LOOKUP:
1434 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1436 + case EXT4_HLOCK_DEL:
1437 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1439 + case EXT4_HLOCK_ADD:
1440 + ld->ld_flags = EXT4_HLOCK_SPLIT;
1445 +#define ext4_htree_de_lock(lck, key) \
1446 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1447 +#define ext4_htree_de_unlock(lck) \
1448 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1450 +#define ext4_htree_spin_lock(lck, key, event) \
1451 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1452 +#define ext4_htree_spin_unlock(lck) \
1453 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1454 +#define ext4_htree_spin_unlock_listen(lck, p) \
1455 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1457 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1459 + if (!ext4_htree_safe_locked(lck) &&
1460 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1461 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1465 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
1466 + DX_HASH_COL_YES, /* there is collision and it does matter */
1467 + DX_HASH_COL_NO, /* there is no collision */
1470 +static int dx_probe_hash_collision(struct htree_lock *lck,
1471 + struct dx_entry *entries,
1472 + struct dx_entry *at, u32 hash)
1474 + if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1475 + return DX_HASH_COL_IGNORE; /* don't care about collision */
1477 + } else if (at == entries + dx_get_count(entries) - 1) {
1478 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1480 + } else { /* hash collision? */
1481 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
1482 + DX_HASH_COL_YES : DX_HASH_COL_NO;
1487 * Probe for a directory leaf block to search.
1489 @@ -379,16 +596,17 @@ struct stats dx_show_entries(struct dx_h
1491 static struct dx_frame *
1492 dx_probe(const struct qstr *d_name, struct inode *dir,
1493 - struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1494 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1495 + struct htree_lock *lck, int *err)
1497 unsigned count, indirect;
1498 - struct dx_entry *at, *entries, *p, *q, *m;
1499 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1500 struct dx_root_info * info;
1501 struct buffer_head *bh;
1502 struct dx_frame *frame = frame_in;
1506 + memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
1507 if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
1510 @@ -418,9 +636,16 @@ dx_probe(const struct qstr *d_name, stru
1514 - if ((indirect = info->indirect_levels) > 1) {
1515 - ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
1516 - info->indirect_levels);
1517 + indirect = info->indirect_levels;
1518 + if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
1519 + ext4_warning(dir->i_sb,
1520 + "Directory (ino: %lu) htree depth %#06x exceed "
1521 + "supported value", dir->i_ino,
1522 + ext4_dir_htree_level(dir->i_sb));
1523 + if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
1524 + ext4_warning(dir->i_sb, "Enable large directory "
1525 + "feature to access it");
1528 *err = ERR_BAD_DX_DIR;
1530 @@ -440,8 +665,15 @@ dx_probe(const struct qstr *d_name, stru
1531 dxtrace(printk("Look up %x", hash));
1534 + if (indirect == 0) { /* the last index level */
1535 + /* NB: ext4_htree_dx_lock() could be noop if
1536 + * DX-lock flag is not set for current operation */
1537 + ext4_htree_dx_lock(lck, dx);
1538 + ext4_htree_spin_lock(lck, dx, NULL);
1540 count = dx_get_count(entries);
1541 - if (!count || count > dx_get_limit(entries)) {
1542 + if (count == 0 || count > dx_get_limit(entries)) {
1543 + ext4_htree_spin_unlock(lck); /* release spin */
1544 ext4_warning(dir->i_sb,
1545 "dx entry: no count or count > limit");
1547 @@ -482,9 +714,73 @@ dx_probe(const struct qstr *d_name, stru
1549 frame->entries = entries;
1551 - if (!indirect--) return frame;
1553 + if (indirect == 0) { /* the last index level */
1554 + struct ext4_dir_lock_data *ld;
1557 + /* By default we only lock DE-block, however, we will
1558 + * also lock the last level DX-block if:
1559 + * a) there is hash collision
1560 + * we will set DX-lock flag (a few lines below)
1561 + * and redo to lock DX-block
1562 + * see detail in dx_probe_hash_collision()
1563 + * b) it's a retry from splitting
1564 + * we need to lock the last level DX-block so nobody
1565 + * else can split any leaf blocks under the same
1566 + * DX-block, see detail in ext4_dx_add_entry()
1568 + if (ext4_htree_dx_locked(lck)) {
1569 + /* DX-block is locked, just lock DE-block
1571 + ext4_htree_spin_unlock(lck);
1572 + if (!ext4_htree_safe_locked(lck))
1573 + ext4_htree_de_lock(lck, frame->at);
1576 + /* it's pdirop and no DX lock */
1577 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
1578 + DX_HASH_COL_YES) {
1579 + /* found hash collision, set DX-lock flag
1580 + * and retry to abtain DX-lock */
1581 + ext4_htree_spin_unlock(lck);
1582 + ext4_htree_dx_need_lock(lck);
1585 + ld = ext4_htree_lock_data(lck);
1586 + /* because I don't lock DX, so @at can't be trusted
1587 + * after I release spinlock so I have to save it */
1589 + ld->ld_at_entry = *at;
1590 + ld->ld_count = dx_get_count(entries);
1592 + frame->at = &ld->ld_at_entry;
1593 + myblock = dx_get_block(at);
1595 + /* NB: ordering locking */
1596 + ext4_htree_spin_unlock_listen(lck, &myblock);
1597 + /* other thread can split this DE-block because:
1598 + * a) I don't have lock for the DE-block yet
1599 + * b) I released spinlock on DX-block
1600 + * if it happened I can detect it by listening
1601 + * splitting event on this DE-block */
1602 + ext4_htree_de_lock(lck, frame->at);
1603 + ext4_htree_spin_stop_listen(lck);
1605 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
1606 + /* someone split this DE-block before
1607 + * I locked it, I need to retry and lock
1608 + * valid DE-block */
1609 + ext4_htree_de_unlock(lck);
1616 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
1619 at = entries = ((struct dx_node *) bh->b_data)->entries;
1620 if (dx_get_limit(entries) != dx_node_limit (dir)) {
1621 ext4_warning(dir->i_sb,
1622 @@ -512,13 +808,18 @@ fail:
1623 static void dx_release (struct dx_frame *frames)
1625 struct dx_root_info *info;
1628 if (frames[0].bh == NULL)
1631 info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
1632 - if (info->indirect_levels)
1633 - brelse(frames[1].bh);
1634 - brelse(frames[0].bh);
1635 + for (i = 0; i <= info->indirect_levels; i++) {
1636 + if (frames[i].bh == NULL)
1638 + brelse(frames[i].bh);
1639 + frames[i].bh = NULL;
1644 @@ -541,7 +842,7 @@ static void dx_release (struct dx_frame
1645 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1646 struct dx_frame *frame,
1647 struct dx_frame *frames,
1648 - __u32 *start_hash)
1649 + __u32 *start_hash, struct htree_lock *lck)
1652 struct buffer_head *bh;
1653 @@ -556,12 +857,22 @@ static int ext4_htree_next_block(struct
1654 * this loop, num_frames indicates the number of interior
1655 * nodes need to be read.
1657 + ext4_htree_de_unlock(lck);
1659 - if (++(p->at) < p->entries + dx_get_count(p->entries))
1661 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1662 + /* num_frames > 0 :
1664 + * ext4_htree_dx_locked:
1665 + * frame->at is reliable pointer returned by dx_probe,
1666 + * otherwise dx_probe already knew no collision */
1667 + if (++(p->at) < p->entries + dx_get_count(p->entries))
1673 + if (num_frames == 1)
1674 + ext4_htree_dx_unlock(lck);
1678 @@ -584,6 +895,13 @@ static int ext4_htree_next_block(struct
1679 * block so no check is necessary
1681 while (num_frames--) {
1682 + if (num_frames == 0) {
1683 + /* it's not always necessary, we just don't want to
1684 + * detect hash collision again */
1685 + ext4_htree_dx_need_lock(lck);
1686 + ext4_htree_dx_lock(lck, p->at);
1689 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
1691 return err; /* Failure */
1692 @@ -592,6 +910,7 @@ static int ext4_htree_next_block(struct
1694 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1696 + ext4_htree_de_lock(lck, p->at);
1700 @@ -661,7 +980,7 @@ int ext4_htree_fill_tree(struct file *di
1702 struct dx_hash_info hinfo;
1703 struct ext4_dir_entry_2 *de;
1704 - struct dx_frame frames[2], *frame;
1705 + struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1709 @@ -684,10 +1003,10 @@ int ext4_htree_fill_tree(struct file *di
1711 hinfo.hash = start_hash;
1712 hinfo.minor_hash = 0;
1713 - frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1714 + /* assume it's PR locked */
1715 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1719 /* Add '.' and '..' from the htree header */
1720 if (!start_hash && !start_minor_hash) {
1721 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1722 @@ -714,7 +1033,7 @@ int ext4_htree_fill_tree(struct file *di
1725 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1726 - frame, frames, &hashval);
1727 + frame, frames, &hashval, NULL);
1728 *next_hash = hashval;
1731 @@ -814,9 +1133,17 @@ static void dx_insert_block(struct dx_fr
1733 static void ext4_update_dx_flag(struct inode *inode)
1735 + /* Disable it for ldiskfs, because going from a DX directory to
1736 + * a non-DX directory while it is in use will completely break
1737 + * the htree-locking.
1738 + * If we really want to support this operation in the future,
1739 + * we need to exclusively lock the directory at here which will
1740 + * increase complexity of code */
1742 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1743 EXT4_FEATURE_COMPAT_DIR_INDEX))
1744 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1749 @@ -889,8 +1216,9 @@ static inline int search_dirblock(struct
1750 * to brelse() it when appropriate.
1752 struct buffer_head * ext4_find_entry(struct inode *dir,
1753 - const struct qstr *d_name,
1754 - struct ext4_dir_entry_2 ** res_dir)
1755 + const struct qstr *d_name,
1756 + struct ext4_dir_entry_2 **res_dir,
1757 + struct htree_lock *lck)
1759 struct super_block *sb;
1760 struct buffer_head *bh_use[NAMEI_RA_SIZE];
1761 @@ -911,7 +1239,7 @@ struct buffer_head * ext4_find_entry(str
1762 if (namelen > EXT4_NAME_LEN)
1765 - bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1766 + bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1768 * On success, or if the error was file not found,
1769 * return. Otherwise, fall back to doing a search the
1770 @@ -921,6 +1249,7 @@ struct buffer_head * ext4_find_entry(str
1772 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1774 + ext4_htree_safe_relock(lck);
1776 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1777 start = EXT4_I(dir)->i_dir_start_lookup;
1778 @@ -998,13 +1327,15 @@ cleanup_and_exit:
1780 EXPORT_SYMBOL(ext4_find_entry);
1782 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1783 - struct ext4_dir_entry_2 **res_dir, int *err)
1784 +static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1785 + const struct qstr *d_name,
1786 + struct ext4_dir_entry_2 **res_dir,
1787 + struct htree_lock *lck, int *err)
1789 struct super_block * sb;
1790 struct dx_hash_info hinfo;
1792 - struct dx_frame frames[2], *frame;
1793 + struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1794 struct ext4_dir_entry_2 *de, *top;
1795 struct buffer_head *bh;
1797 @@ -1015,13 +1346,16 @@ static struct buffer_head * ext4_dx_find
1799 /* NFS may look up ".." - look at dx_root directory block */
1800 if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
1801 - if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1802 + if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1806 frame->bh = NULL; /* for dx_release() */
1807 frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
1808 dx_set_block(frame->at, 0); /* dx_root block is 0 */
1809 + /* "." and ".." are stored in root DX lock */
1810 + ext4_htree_dx_need_lock(lck);
1811 + ext4_htree_dx_lock(lck, NULL);
1815 @@ -1050,7 +1384,7 @@ static struct buffer_head * ext4_dx_find
1817 /* Check to see if we should continue to search */
1818 retval = ext4_htree_next_block(dir, hash, frame,
1820 + frames, NULL, lck);
1823 "error reading index page in directory #%lu",
1824 @@ -1076,7 +1410,7 @@ static struct dentry *ext4_lookup(struct
1825 if (dentry->d_name.len > EXT4_NAME_LEN)
1826 return ERR_PTR(-ENAMETOOLONG);
1828 - bh = ext4_find_entry(dir, &dentry->d_name, &de);
1829 + bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1832 __u32 ino = le32_to_cpu(de->inode);
1833 @@ -1144,7 +1478,7 @@ struct dentry *ext4_get_parent(struct de
1834 struct ext4_dir_entry_2 * de;
1835 struct buffer_head *bh;
1837 - bh = ext4_find_entry(child->d_inode, &dotdot, &de);
1838 + bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1841 return ERR_PTR(-ENOENT);
1842 @@ -1233,8 +1567,9 @@ static struct ext4_dir_entry_2* dx_pack_
1843 * Returns pointer to de in block into which the new entry will be inserted.
1845 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1846 - struct buffer_head **bh,struct dx_frame *frame,
1847 - struct dx_hash_info *hinfo, int *error)
1848 + struct buffer_head **bh, struct dx_frame *frames,
1849 + struct dx_frame *frame, struct dx_hash_info *hinfo,
1850 + struct htree_lock *lck, int *error)
1852 unsigned blocksize = dir->i_sb->s_blocksize;
1853 unsigned count, continued;
1854 @@ -1291,7 +1626,14 @@ static struct ext4_dir_entry_2 *do_split
1855 hash2, split, count-split));
1857 /* Fancy dance to stay within two buffers */
1858 - de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1859 + if (hinfo->hash < hash2) {
1860 + de2 = dx_move_dirents(data1, data2, map + split,
1861 + count - split, blocksize);
1863 + /* make sure we will add entry to the same block which
1864 + * we have already locked */
1865 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1867 de = dx_pack_dirents(data1, blocksize);
1868 de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
1870 @@ -1300,13 +1642,21 @@ static struct ext4_dir_entry_2 *do_split
1871 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1872 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1874 - /* Which block gets the new entry? */
1875 - if (hinfo->hash >= hash2)
1879 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1880 + frame->at); /* notify block is being split */
1881 + if (hinfo->hash < hash2) {
1882 + dx_insert_block(frame, hash2 + continued, newblock);
1885 + /* switch block number */
1886 + dx_insert_block(frame, hash2 + continued,
1887 + dx_get_block(frame->at));
1888 + dx_set_block(frame->at, newblock);
1891 - dx_insert_block(frame, hash2 + continued, newblock);
1892 + ext4_htree_spin_unlock(lck);
1893 + ext4_htree_dx_unlock(lck);
1895 err = ext4_handle_dirty_metadata(handle, dir, bh2);
1898 @@ -1418,7 +1768,7 @@ static int add_dirent_to_buf(handle_t *h
1899 if (!IS_NOCMTIME(dir))
1900 dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1901 ext4_update_dx_flag(dir);
1903 + inode_inc_iversion(dir);
1904 ext4_mark_inode_dirty(handle, dir);
1905 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1906 err = ext4_handle_dirty_metadata(handle, dir, bh);
1907 @@ -1438,7 +1788,7 @@ static int make_indexed_dir(handle_t *ha
1908 const char *name = dentry->d_name.name;
1909 int namelen = dentry->d_name.len;
1910 struct buffer_head *bh2;
1911 - struct dx_frame frames[2], *frame;
1912 + struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1913 struct dx_entry *entries;
1914 struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
1916 @@ -1517,7 +1867,7 @@ static int make_indexed_dir(handle_t *ha
1917 frame->at = entries;
1920 - de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1921 + de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
1922 dx_release (frames);
1925 @@ -1616,7 +1966,7 @@ out:
1926 * the entry, as someone else might have used it while you slept.
1928 int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1929 - struct inode *inode)
1930 + struct inode *inode, struct htree_lock *lck)
1932 struct inode *dir = dentry->d_parent->d_inode;
1933 struct buffer_head *bh;
1934 @@ -1635,9 +1985,10 @@ int ext4_add_entry(handle_t *handle, str
1935 if (dentry->d_name.len == 2 &&
1936 memcmp(dentry->d_name.name, "..", 2) == 0)
1937 return ext4_update_dotdot(handle, dentry, inode);
1938 - retval = ext4_dx_add_entry(handle, dentry, inode);
1939 + retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1940 if (!retval || (retval != ERR_BAD_DX_DIR))
1942 + ext4_htree_safe_relock(lck);
1943 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1945 ext4_mark_inode_dirty(handle, dir);
1946 @@ -1674,18 +2025,21 @@ EXPORT_SYMBOL(ext4_add_entry);
1947 * Returns 0 for success, or a negative error value
1949 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1950 - struct inode *inode)
1951 + struct inode *inode, struct htree_lock *lck)
1953 - struct dx_frame frames[2], *frame;
1954 + struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1955 struct dx_entry *entries, *at;
1956 struct dx_hash_info hinfo;
1957 struct buffer_head *bh;
1958 struct inode *dir = dentry->d_parent->d_inode;
1959 struct super_block *sb = dir->i_sb;
1960 struct ext4_dir_entry_2 *de;
1964 - frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1967 + frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1970 entries = frame->entries;
1971 @@ -1694,33 +2048,53 @@ static int ext4_dx_add_entry(handle_t *h
1972 if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
1975 - BUFFER_TRACE(bh, "get_write_access");
1976 - err = ext4_journal_get_write_access(handle, bh);
1978 - goto journal_error;
1980 err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1985 /* Block full, should compress but for now just split */
1986 dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1987 dx_get_count(entries), dx_get_limit(entries)));
1988 /* Need to split index? */
1989 if (dx_get_count(entries) == dx_get_limit(entries)) {
1990 ext4_lblk_t newblock;
1991 - unsigned icount = dx_get_count(entries);
1992 - int levels = frame - frames;
1993 + int levels = frame - frames + 1;
1995 + int add_level = 1;
1996 struct dx_entry *entries2;
1997 struct dx_node *node2;
1998 struct buffer_head *bh2;
2000 - if (levels && (dx_get_count(frames->entries) ==
2001 - dx_get_limit(frames->entries))) {
2002 - ext4_warning(sb, "Directory index full!");
2003 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
2004 + ext4_htree_safe_relock(lck);
2008 + while (frame > frames) {
2009 + if (dx_get_count((frame - 1)->entries) <
2010 + dx_get_limit((frame - 1)->entries)) {
2014 + frame--; /* split higher index block */
2016 + entries = frame->entries;
2019 + if (add_level && levels == ext4_dir_htree_level(sb)) {
2020 + ext4_warning(sb, "Directory (ino: %lu) index full, "
2021 + "reach max htree level :%d",
2022 + dir->i_ino, levels);
2023 + if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
2024 + ext4_warning(sb, "Large directory feature is"
2025 + "not enabled on this "
2031 + icount = dx_get_count(entries);
2032 bh2 = ext4_append (handle, dir, &newblock, &err);
2035 @@ -1733,7 +2107,7 @@ static int ext4_dx_add_entry(handle_t *h
2036 err = ext4_journal_get_write_access(handle, frame->bh);
2041 unsigned icount1 = icount/2, icount2 = icount - icount1;
2042 unsigned hash2 = dx_get_hash(entries + icount1);
2043 dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
2044 @@ -1741,7 +2115,7 @@ static int ext4_dx_add_entry(handle_t *h
2046 BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2047 err = ext4_journal_get_write_access(handle,
2053 @@ -1757,18 +2131,24 @@ static int ext4_dx_add_entry(handle_t *h
2054 frame->entries = entries = entries2;
2055 swap(frame->bh, bh2);
2057 - dx_insert_block(frames + 0, hash2, newblock);
2058 - dxtrace(dx_show_index("node", frames[1].entries));
2059 + dx_insert_block((frame - 1), hash2, newblock);
2060 + dxtrace(dx_show_index("node", frame->entries));
2061 dxtrace(dx_show_index("node",
2062 ((struct dx_node *) bh2->b_data)->entries));
2063 err = ext4_handle_dirty_metadata(handle, inode, bh2);
2067 + ext4_handle_dirty_metadata(handle, inode,
2070 + ext4_handle_dirty_metadata(handle, inode,
2075 struct dx_root_info * info;
2076 - dxtrace(printk(KERN_DEBUG
2077 - "Creating second level index...\n"));
2079 memcpy((char *) entries2, (char *) entries,
2080 icount * sizeof(struct dx_entry));
2081 dx_set_limit(entries2, dx_node_limit(dir));
2082 @@ -1778,32 +2158,60 @@ static int ext4_dx_add_entry(handle_t *h
2083 dx_set_block(entries + 0, newblock);
2084 info = dx_get_dx_info((struct ext4_dir_entry_2*)
2085 frames[0].bh->b_data);
2086 - info->indirect_levels = 1;
2087 + info->indirect_levels += 1;
2088 + dxtrace(printk(KERN_DEBUG
2089 + "Creating %d level index...\n",
2090 + info->indirect_levels));
2091 + ext4_handle_dirty_metadata(handle, inode, frame->bh);
2092 + ext4_handle_dirty_metadata(handle, inode, bh2);
2097 + } else if (!ext4_htree_dx_locked(lck)) {
2098 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
2100 - /* Add new access path frame */
2101 - frame = frames + 1;
2102 - frame->at = at = at - entries + entries2;
2103 - frame->entries = entries = entries2;
2105 - err = ext4_journal_get_write_access(handle,
2108 - goto journal_error;
2109 + /* not well protected, require DX lock */
2110 + ext4_htree_dx_need_lock(lck);
2111 + at = frame > frames ? (frame - 1)->at : NULL;
2113 + /* NB: no risk of deadlock because it's just a try.
2115 + * NB: we check ld_count for twice, the first time before
2116 + * having DX lock, the second time after holding DX lock.
2118 + * NB: We never free blocks for directory so far, which
2119 + * means value returned by dx_get_count() should equal to
2120 + * ld->ld_count if nobody split any DE-block under @at,
2121 + * and ld->ld_at still points to valid dx_entry. */
2122 + if ((ld->ld_count != dx_get_count(entries)) ||
2123 + !ext4_htree_dx_lock_try(lck, at) ||
2124 + (ld->ld_count != dx_get_count(entries))) {
2128 - ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
2129 + /* OK, I've got DX lock and nothing changed */
2130 + frame->at = ld->ld_at;
2132 - de = do_split(handle, dir, &bh, frame, &hinfo, &err);
2133 + de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
2137 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
2141 ext4_std_error(dir->i_sb, err);
2143 + ext4_htree_dx_unlock(lck);
2144 + ext4_htree_de_unlock(lck);
2148 + /* @restart is true means htree-path has been changed, we need to
2149 + * repeat dx_probe() to find out valid htree-path */
2150 + if (restart && err == 0)
2155 @@ -1838,7 +2246,7 @@ int ext4_delete_entry(handle_t *handle,
2160 + inode_inc_iversion(dir);
2161 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
2162 ext4_handle_dirty_metadata(handle, dir, bh);
2164 @@ -1882,7 +2290,7 @@ static void ext4_dec_count(handle_t *han
2165 static int ext4_add_nondir(handle_t *handle,
2166 struct dentry *dentry, struct inode *inode)
2168 - int err = ext4_add_entry(handle, dentry, inode);
2169 + int err = ext4_add_entry(handle, dentry, inode, NULL);
2171 ext4_mark_inode_dirty(handle, inode);
2172 d_instantiate(dentry, inode);
2173 @@ -2112,7 +2520,7 @@ retry:
2177 - err = ext4_add_entry(handle, dentry, inode);
2178 + err = ext4_add_entry(handle, dentry, inode, NULL);
2181 unlock_new_inode(inode);
2182 @@ -2381,7 +2789,7 @@ static int ext4_rmdir(struct inode *dir,
2183 return PTR_ERR(handle);
2186 - bh = ext4_find_entry(dir, &dentry->d_name, &de);
2187 + bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2191 @@ -2443,7 +2851,7 @@ static int ext4_unlink(struct inode *dir
2192 ext4_handle_sync(handle);
2195 - bh = ext4_find_entry(dir, &dentry->d_name, &de);
2196 + bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2200 @@ -2567,7 +2975,7 @@ retry:
2201 ext4_inc_count(handle, inode);
2202 atomic_inc(&inode->i_count);
2204 - err = ext4_add_entry(handle, dentry, inode);
2205 + err = ext4_add_entry(handle, dentry, inode, NULL);
2207 ext4_mark_inode_dirty(handle, inode);
2208 d_instantiate(dentry, inode);
2209 @@ -2612,7 +3020,7 @@ static int ext4_rename(struct inode *old
2210 if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
2211 ext4_handle_sync(handle);
2213 - old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
2214 + old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
2216 * Check for inode number is _not_ due to possible IO errors.
2217 * We might rmdir the source, keep it as pwd of some process
2218 @@ -2625,7 +3033,7 @@ static int ext4_rename(struct inode *old
2221 new_inode = new_dentry->d_inode;
2222 - new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
2223 + new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, NULL);
2227 @@ -2651,7 +3059,7 @@ static int ext4_rename(struct inode *old
2231 - retval = ext4_add_entry(handle, new_dentry, old_inode);
2232 + retval = ext4_add_entry(handle, new_dentry, old_inode, NULL);
2236 @@ -2693,7 +3101,8 @@ static int ext4_rename(struct inode *old
2237 struct buffer_head *old_bh2;
2238 struct ext4_dir_entry_2 *old_de2;
2240 - old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
2241 + old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
2244 retval = ext4_delete_entry(handle, old_dir,
2246 --- linux-2.6.32-131.6.1/fs/ext4/inode.c 2011-10-06 20:10:49.000000000 +0800
2247 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/inode.c 2011-12-01 22:02:11.000000000 +0800
2248 @@ -5112,7 +5112,7 @@ struct inode *ext4_iget(struct super_blo
2249 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
2251 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2252 - inode->i_size = ext4_isize(raw_inode);
2253 + inode->i_size = ext4_isize(sb, raw_inode);
2254 ei->i_disksize = inode->i_size;
2256 ei->i_reserved_quota = 0;
2257 --- linux-2.6.32-131.6.1/fs/ext4/Makefile 2011-10-06 20:10:49.000000000 +0800
2258 +++ linux-2.6.32-131.6.1-pdo/fs/ext4/Makefile 2011-10-06 12:21:30.000000000 +0800
2259 @@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
2260 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
2261 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
2262 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
2264 + htree_lock.o mmp.o dynlocks.o
2266 ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
2267 ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o