1 Single directory performance is a critical for HPC workloads. In a
2 typical use case an application creates a separate output file for
3 each node and task in a job. As nodes and tasks increase, hundreds
4 of thousands of files may be created in a single directory within
5 a short window of time.
6 Today, both filename lookup and file system modifying operations
7 (such as create and unlink) are protected with a single lock for
8 an entire ldiskfs directory. PDO project will remove this
9 bottleneck by introducing a parallel locking mechanism for entire
10 ldiskfs directories. This work will enable multiple application
11 threads to simultaneously lookup, create and unlink in parallel.
14 - pdirops support for ldiskfs
15 - integrate with osd-ldiskfs
17 Index: linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
18 ===================================================================
20 +++ linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
23 + * include/linux/htree_lock.h
25 + * Copyright (c) 2011, 2012, Intel Corporation.
27 + * Author: Liang Zhen <liang@whamcloud.com>
33 + * htree_lock is an advanced lock, it can support five lock modes (concept is
34 + * taken from DLM) and it's a sleeping lock.
36 + * most common use case is:
37 + * - create a htree_lock_head for data
38 + * - each thread (contender) creates it's own htree_lock
39 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
40 + * call htree_unlock to release lock
42 + * Also, there is advanced use-case which is more complex, user can have
43 + * PW/PR lock on particular key, it's mostly used while user holding shared
44 + * lock on the htree (CW, CR)
46 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
47 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
49 + * htree_node_unlock(lock_node);; unlock the key
51 + * Another tip is, we can have N-levels of this kind of keys, all we need to
52 + * do is specifying N-levels while creating htree_lock_head, then we can
53 + * lock/unlock a specific level by:
54 + * htree_node_lock(lock_node, mode1, key1, level1...);
56 + * htree_node_lock(lock_node, mode1, key2, level2...);
58 + * htree_node_unlock(lock_node, level2);
59 + * htree_node_unlock(lock_node, level1);
61 + * NB: for multi-level, should be careful about locking order to avoid deadlock
64 +#ifndef _LINUX_HTREE_LOCK_H
65 +#define _LINUX_HTREE_LOCK_H
67 +#include <linux/list.h>
68 +#include <linux/spinlock.h>
69 +#include <linux/sched.h>
73 + * more details can be found here:
74 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
77 + HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
78 + HTREE_LOCK_PW, /* protected write: allows only CR users */
79 + HTREE_LOCK_PR, /* protected read: allow PR, CR users */
80 + HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
81 + HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
82 + HTREE_LOCK_MAX, /* number of lock modes */
85 +#define HTREE_LOCK_NL HTREE_LOCK_MAX
86 +#define HTREE_LOCK_INVAL 0xdead10c
89 + HTREE_HBITS_MIN = 2,
90 + HTREE_HBITS_DEF = 14,
91 + HTREE_HBITS_MAX = 32,
95 + HTREE_EVENT_DISABLE = (0),
96 + HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
97 + HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
98 + HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
103 +typedef void (*htree_event_cb_t)(void *target, void *event);
105 +struct htree_lock_child {
106 + struct list_head lc_list; /* granted list */
107 + htree_event_cb_t lc_callback; /* event callback */
108 + unsigned lc_events; /* event types */
111 +struct htree_lock_head {
112 + unsigned long lh_lock; /* bits lock */
113 + /* blocked lock list (htree_lock) */
114 + struct list_head lh_blocked_list;
117 + /* hash bits for key and limit number of locks */
119 + /* counters for blocked locks */
120 + u16 lh_nblocked[HTREE_LOCK_MAX];
121 + /* counters for granted locks */
122 + u16 lh_ngranted[HTREE_LOCK_MAX];
125 + /* array of children locks */
126 + struct htree_lock_child lh_children[0];
129 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
130 +struct htree_lock_node {
131 + htree_lock_mode_t ln_mode;
132 + /* major hash key */
134 + /* minor hash key */
136 + struct list_head ln_major_list;
137 + struct list_head ln_minor_list;
138 + /* alive list, all locks (granted, blocked, listening) are on it */
139 + struct list_head ln_alive_list;
141 + struct list_head ln_blocked_list;
143 + struct list_head ln_granted_list;
144 + void *ln_ev_target;
148 + struct task_struct *lk_task;
149 + struct htree_lock_head *lk_head;
152 + htree_lock_mode_t lk_mode;
153 + struct list_head lk_blocked_list;
154 + struct htree_lock_node lk_nodes[0];
157 +/* create a lock head, which stands for a resource */
158 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
159 + unsigned hbits, unsigned priv);
160 +/* free a lock head */
161 +void htree_lock_head_free(struct htree_lock_head *lhead);
162 +/* register event callback for child lock at level @depth */
163 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
164 + unsigned events, htree_event_cb_t callback);
165 +/* create a lock handle, which stands for a thread */
166 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
167 +/* free a lock handle */
168 +void htree_lock_free(struct htree_lock *lck);
169 +/* lock htree, when @wait is true, 0 is returned if the lock can't
170 + * be granted immediately */
171 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
172 + htree_lock_mode_t mode, int wait);
174 +void htree_unlock(struct htree_lock *lck);
175 +/* unlock and relock htree with @new_mode */
176 +int htree_change_lock_try(struct htree_lock *lck,
177 + htree_lock_mode_t new_mode, int wait);
178 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
179 +/* require child lock (key) of htree at level @dep, @event will be sent to all
180 + * listeners on this @key while lock being granted */
181 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
182 + u32 key, unsigned dep, int wait, void *event);
183 +/* release child lock at level @dep, this lock will listen on it's key
184 + * if @event isn't NULL, event_cb will be called against @lck while granting
185 + * any other lock at level @dep with the same key */
186 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
187 +/* stop listening on child lock at level @dep */
188 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
190 +void htree_lock_stat_print(int depth);
191 +void htree_lock_stat_reset(void);
193 +#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
194 +#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
196 +#define htree_lock_mode(lck) ((lck)->lk_mode)
198 +#define htree_node_lock(lck, mode, key, dep) \
199 + htree_node_lock_try(lck, mode, key, dep, 1, NULL)
200 +/* this is only safe in thread context of lock owner */
201 +#define htree_node_is_granted(lck, dep) \
202 + ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
203 + (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
204 +/* this is only safe in thread context of lock owner */
205 +#define htree_node_is_listening(lck, dep) \
206 + ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
209 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
210 ===================================================================
212 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
215 + * fs/ext4/htree_lock.c
217 + * Copyright (c) 2011, 2012, Intel Corporation.
219 + * Author: Liang Zhen <liang@whamcloud.com>
221 +#include <linux/jbd2.h>
222 +#include <linux/hash.h>
223 +#include <linux/module.h>
224 +#include <linux/htree_lock.h>
227 + HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
228 + HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
229 + HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
230 + HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
231 + HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
235 + HTREE_LOCK_COMPAT_EX = 0,
236 + HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
237 + HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
238 + HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
239 + HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
243 +static int htree_lock_compat[] = {
244 + [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
245 + [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
246 + [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
247 + [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
248 + [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
251 +/* max allowed htree-lock depth.
252 + * We only need depth=3 for ext4 although user can have higher value. */
253 +#define HTREE_LOCK_DEP_MAX 16
255 +#ifdef HTREE_LOCK_DEBUG
257 +static char *hl_name[] = {
258 + [HTREE_LOCK_EX] "EX",
259 + [HTREE_LOCK_PW] "PW",
260 + [HTREE_LOCK_PR] "PR",
261 + [HTREE_LOCK_CW] "CW",
262 + [HTREE_LOCK_CR] "CR",
266 +struct htree_lock_node_stats {
267 + unsigned long long blocked[HTREE_LOCK_MAX];
268 + unsigned long long granted[HTREE_LOCK_MAX];
269 + unsigned long long retried[HTREE_LOCK_MAX];
270 + unsigned long long events;
273 +struct htree_lock_stats {
274 + struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
275 + unsigned long long granted[HTREE_LOCK_MAX];
276 + unsigned long long blocked[HTREE_LOCK_MAX];
279 +static struct htree_lock_stats hl_stats;
281 +void htree_lock_stat_reset(void)
283 + memset(&hl_stats, 0, sizeof(hl_stats));
286 +void htree_lock_stat_print(int depth)
291 + printk(KERN_DEBUG "HTREE LOCK STATS:\n");
292 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
293 + printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
294 + hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
296 + for (i = 0; i < depth; i++) {
297 + printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
298 + for (j = 0; j < HTREE_LOCK_MAX; j++) {
300 + "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
301 + hl_name[j], hl_stats.nodes[i].granted[j],
302 + hl_stats.nodes[i].blocked[j],
303 + hl_stats.nodes[i].retried[j]);
308 +#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
309 +#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
310 +#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
311 +#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
312 +#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
313 +#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
317 +void htree_lock_stat_reset(void) {}
318 +void htree_lock_stat_print(int depth) {}
320 +#define lk_grant_inc(m) do {} while (0)
321 +#define lk_block_inc(m) do {} while (0)
322 +#define ln_grant_inc(d, m) do {} while (0)
323 +#define ln_block_inc(d, m) do {} while (0)
324 +#define ln_retry_inc(d, m) do {} while (0)
325 +#define ln_event_inc(d) do {} while (0)
329 +EXPORT_SYMBOL(htree_lock_stat_reset);
330 +EXPORT_SYMBOL(htree_lock_stat_print);
332 +#define HTREE_DEP_ROOT (-1)
334 +#define htree_spin_lock(lhead, dep) \
335 + bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
336 +#define htree_spin_unlock(lhead, dep) \
337 + bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
339 +#define htree_key_event_ignore(child, ln) \
340 + (!((child)->lc_events & (1 << (ln)->ln_mode)))
343 +htree_key_list_empty(struct htree_lock_node *ln)
345 + return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
349 +htree_key_list_del_init(struct htree_lock_node *ln)
351 + struct htree_lock_node *tmp = NULL;
353 + if (!list_empty(&ln->ln_minor_list)) {
354 + tmp = list_entry(ln->ln_minor_list.next,
355 + struct htree_lock_node, ln_minor_list);
356 + list_del_init(&ln->ln_minor_list);
359 + if (list_empty(&ln->ln_major_list))
362 + if (tmp == NULL) { /* not on minor key list */
363 + list_del_init(&ln->ln_major_list);
365 + BUG_ON(!list_empty(&tmp->ln_major_list));
366 + list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
371 +htree_key_list_replace_init(struct htree_lock_node *old,
372 + struct htree_lock_node *new)
374 + if (!list_empty(&old->ln_major_list))
375 + list_replace_init(&old->ln_major_list, &new->ln_major_list);
377 + if (!list_empty(&old->ln_minor_list))
378 + list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
382 +htree_key_event_enqueue(struct htree_lock_child *child,
383 + struct htree_lock_node *ln, int dep, void *event)
385 + struct htree_lock_node *tmp;
387 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
388 + BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
389 + if (event == NULL || htree_key_event_ignore(child, ln))
392 + /* shouldn't be a very long list */
393 + list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
394 + if (tmp->ln_mode == HTREE_LOCK_NL) {
396 + if (child->lc_callback != NULL)
397 + child->lc_callback(tmp->ln_ev_target, event);
403 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
404 + unsigned dep, int wait, void *event)
406 + struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
407 + struct htree_lock_node *newln = &newlk->lk_nodes[dep];
408 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
410 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
411 + /* NB: we only expect PR/PW lock mode at here, only these two modes are
412 + * allowed for htree_node_lock(asserted in htree_node_lock_internal),
413 + * NL is only used for listener, user can't directly require NL mode */
414 + if ((curln->ln_mode == HTREE_LOCK_NL) ||
415 + (curln->ln_mode != HTREE_LOCK_PW &&
416 + newln->ln_mode != HTREE_LOCK_PW)) {
417 + /* no conflict, attach it on granted list of @curlk */
418 + if (curln->ln_mode != HTREE_LOCK_NL) {
419 + list_add(&newln->ln_granted_list,
420 + &curln->ln_granted_list);
422 + /* replace key owner */
423 + htree_key_list_replace_init(curln, newln);
426 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
427 + htree_key_event_enqueue(child, newln, dep, event);
428 + ln_grant_inc(dep, newln->ln_mode);
429 + return 1; /* still hold lh_lock */
432 + if (!wait) { /* can't grant and don't want to wait */
433 + ln_retry_inc(dep, newln->ln_mode);
434 + newln->ln_mode = HTREE_LOCK_INVAL;
435 + return -1; /* don't wait and just return -1 */
438 + newlk->lk_task = current;
439 + /* conflict, attach it on blocked list of curlk */
440 + list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
441 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
442 + ln_block_inc(dep, newln->ln_mode);
445 + set_current_state(TASK_UNINTERRUPTIBLE);
446 + htree_spin_unlock(newlk->lk_head, dep);
447 + /* wait to be given the lock */
448 + if (newlk->lk_task != NULL)
450 + /* granted, no doubt, wake up will set me RUNNING */
451 + htree_spin_lock(newlk->lk_head, dep);
452 + /* Need to check lock really granted, thread maybe awaken wrongly */
453 + if (list_empty(&newln->ln_granted_list) && htree_key_list_empty(newln))
455 + if (event && !htree_key_event_ignore(child, newln))
456 + htree_key_event_enqueue(child, newln, dep, event);
458 + return 1; /* still hold lh_lock */
462 + * get PR/PW access to particular tree-node according to @dep and @key,
463 + * it will return -1 if @wait is false and can't immediately grant this lock.
464 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
465 + * @event if it's not NULL.
466 + * NB: ALWAYS called holding lhead::lh_lock
469 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
470 + htree_lock_mode_t mode, u32 key, unsigned dep,
471 + int wait, void *event)
474 + struct htree_lock *tmp;
475 + struct htree_lock *tmp2;
482 + BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
483 + BUG_ON(htree_node_is_granted(lck, dep));
485 + key = hash_long(key, lhead->lh_hbits);
487 + mi_bits = lhead->lh_hbits >> 1;
488 + ma_bits = lhead->lh_hbits - mi_bits;
490 + lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
491 + lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
492 + lck->lk_nodes[dep].ln_mode = mode;
495 + * The major key list is an ordered list, so searches are started
496 + * at the end of the list that is numerically closer to major_key,
497 + * so at most half of the list will be walked (for well-distributed
498 + * keys). The list traversal aborts early if the expected key
499 + * location is passed.
501 + reverse = (major >= (1 << (ma_bits - 1)));
504 + list_for_each_entry_reverse(tmp,
505 + &lhead->lh_children[dep].lc_list,
506 + lk_nodes[dep].ln_major_list) {
507 + if (tmp->lk_nodes[dep].ln_major_key == major) {
510 + } else if (tmp->lk_nodes[dep].ln_major_key < major) {
511 + /* attach _after_ @tmp */
512 + list_add(&lck->lk_nodes[dep].ln_major_list,
513 + &tmp->lk_nodes[dep].ln_major_list);
514 + goto out_grant_major;
518 + list_add(&lck->lk_nodes[dep].ln_major_list,
519 + &lhead->lh_children[dep].lc_list);
520 + goto out_grant_major;
523 + list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
524 + lk_nodes[dep].ln_major_list) {
525 + if (tmp->lk_nodes[dep].ln_major_key == major) {
528 + } else if (tmp->lk_nodes[dep].ln_major_key > major) {
529 + /* insert _before_ @tmp */
530 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
531 + &tmp->lk_nodes[dep].ln_major_list);
532 + goto out_grant_major;
536 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
537 + &lhead->lh_children[dep].lc_list);
538 + goto out_grant_major;
543 + * NB: minor_key list doesn't have a "head", @list is just a
544 + * temporary stub for helping list searching, make sure it's removed
546 + * minor_key list is an ordered list too.
548 + list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
550 + reverse = (minor >= (1 << (mi_bits - 1)));
553 + list_for_each_entry_reverse(tmp2, &list,
554 + lk_nodes[dep].ln_minor_list) {
555 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
558 + } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
559 + /* attach _after_ @tmp2 */
560 + list_add(&lck->lk_nodes[dep].ln_minor_list,
561 + &tmp2->lk_nodes[dep].ln_minor_list);
562 + goto out_grant_minor;
566 + list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
569 + list_for_each_entry(tmp2, &list,
570 + lk_nodes[dep].ln_minor_list) {
571 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
574 + } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
575 + /* insert _before_ @tmp2 */
576 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
577 + &tmp2->lk_nodes[dep].ln_minor_list);
578 + goto out_grant_minor;
582 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
586 + if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
587 + /* new lock @lck is the first one on minor_key list, which
588 + * means it has the smallest minor_key and it should
589 + * replace @tmp as minor_key owner */
590 + list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
591 + &lck->lk_nodes[dep].ln_major_list);
593 + /* remove the temporary head */
597 + ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
598 + return 1; /* granted with holding lh_lock */
601 + list_del(&list); /* remove temprary head */
602 + return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
606 + * release the key of @lck at level @dep, and grant any blocked locks.
607 + * caller will still listen on @key if @event is not NULL, which means
608 + * caller can see a event (by event_cb) while granting any lock with
609 + * the same key at level @dep.
610 + * NB: ALWAYS called holding lhead::lh_lock
611 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
614 +htree_node_unlock_internal(struct htree_lock_head *lhead,
615 + struct htree_lock *curlk, unsigned dep, void *event)
617 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
618 + struct htree_lock *grtlk = NULL;
619 + struct htree_lock_node *grtln;
620 + struct htree_lock *poslk;
621 + struct htree_lock *tmplk;
623 + if (!htree_node_is_granted(curlk, dep))
626 + if (!list_empty(&curln->ln_granted_list)) {
627 + /* there is another granted lock */
628 + grtlk = list_entry(curln->ln_granted_list.next,
630 + lk_nodes[dep].ln_granted_list);
631 + list_del_init(&curln->ln_granted_list);
634 + if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
636 + * @curlk is the only granted lock, so we confirmed:
637 + * a) curln is key owner (attached on major/minor_list),
638 + * so if there is any blocked lock, it should be attached
639 + * on curln->ln_blocked_list
640 + * b) we always can grant the first blocked lock
642 + grtlk = list_entry(curln->ln_blocked_list.next,
644 + lk_nodes[dep].ln_blocked_list);
645 + BUG_ON(grtlk->lk_task == NULL);
646 + wake_up_process(grtlk->lk_task);
649 + if (event != NULL &&
650 + lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
651 + curln->ln_ev_target = event;
652 + curln->ln_mode = HTREE_LOCK_NL; /* listen! */
654 + curln->ln_mode = HTREE_LOCK_INVAL;
657 + if (grtlk == NULL) { /* I must be the only one locking this key */
658 + struct htree_lock_node *tmpln;
660 + BUG_ON(htree_key_list_empty(curln));
662 + if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
665 + /* not listening */
666 + if (list_empty(&curln->ln_alive_list)) { /* no more listener */
667 + htree_key_list_del_init(curln);
671 + tmpln = list_entry(curln->ln_alive_list.next,
672 + struct htree_lock_node, ln_alive_list);
674 + BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
676 + htree_key_list_replace_init(curln, tmpln);
677 + list_del_init(&curln->ln_alive_list);
682 + /* have a granted lock */
683 + grtln = &grtlk->lk_nodes[dep];
684 + if (!list_empty(&curln->ln_blocked_list)) {
685 + /* only key owner can be on both lists */
686 + BUG_ON(htree_key_list_empty(curln));
688 + if (list_empty(&grtln->ln_blocked_list)) {
689 + list_add(&grtln->ln_blocked_list,
690 + &curln->ln_blocked_list);
692 + list_del_init(&curln->ln_blocked_list);
695 + * NB: this is the tricky part:
696 + * We have only two modes for child-lock (PR and PW), also,
697 + * only owner of the key (attached on major/minor_list) can be on
698 + * both blocked_list and granted_list, so @grtlk must be one
699 + * of these two cases:
701 + * a) @grtlk is taken from granted_list, which means we've granted
702 + * more than one lock so @grtlk has to be PR, the first blocked
703 + * lock must be PW and we can't grant it at all.
704 + * So even @grtlk is not owner of the key (empty blocked_list),
705 + * we don't care because we can't grant any lock.
706 + * b) we just grant a new lock which is taken from head of blocked
707 + * list, and it should be the first granted lock, and it should
708 + * be the first one linked on blocked_list.
710 + * Either way, we can get correct result by iterating blocked_list
711 + * of @grtlk, and don't have to bother on how to find out
712 + * owner of current key.
714 + list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
715 + lk_nodes[dep].ln_blocked_list) {
716 + if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
717 + poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
719 + /* grant all readers */
720 + list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
721 + list_add(&poslk->lk_nodes[dep].ln_granted_list,
722 + &grtln->ln_granted_list);
724 + BUG_ON(poslk->lk_task == NULL);
725 + wake_up_process(poslk->lk_task);
728 + /* if @curln is the owner of this key, replace it with @grtln */
729 + if (!htree_key_list_empty(curln))
730 + htree_key_list_replace_init(curln, grtln);
732 + if (curln->ln_mode == HTREE_LOCK_INVAL)
733 + list_del_init(&curln->ln_alive_list);
737 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
738 + * and 0 only if @wait is false and can't grant it immediately
741 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
742 + u32 key, unsigned dep, int wait, void *event)
744 + struct htree_lock_head *lhead = lck->lk_head;
747 + BUG_ON(dep >= lck->lk_depth);
748 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
750 + htree_spin_lock(lhead, dep);
751 + rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
753 + htree_spin_unlock(lhead, dep);
756 +EXPORT_SYMBOL(htree_node_lock_try);
758 +/* it's wrapper of htree_node_unlock_internal */
760 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
762 + struct htree_lock_head *lhead = lck->lk_head;
764 + BUG_ON(dep >= lck->lk_depth);
765 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
767 + htree_spin_lock(lhead, dep);
768 + htree_node_unlock_internal(lhead, lck, dep, event);
769 + htree_spin_unlock(lhead, dep);
771 +EXPORT_SYMBOL(htree_node_unlock);
773 +/* stop listening on child-lock level @dep */
775 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
777 + struct htree_lock_node *ln = &lck->lk_nodes[dep];
778 + struct htree_lock_node *tmp;
780 + BUG_ON(htree_node_is_granted(lck, dep));
781 + BUG_ON(!list_empty(&ln->ln_blocked_list));
782 + BUG_ON(!list_empty(&ln->ln_granted_list));
784 + if (!htree_node_is_listening(lck, dep))
787 + htree_spin_lock(lck->lk_head, dep);
788 + ln->ln_mode = HTREE_LOCK_INVAL;
789 + ln->ln_ev_target = NULL;
791 + if (htree_key_list_empty(ln)) { /* not owner */
792 + list_del_init(&ln->ln_alive_list);
796 + /* I'm the owner... */
797 + if (list_empty(&ln->ln_alive_list)) { /* no more listener */
798 + htree_key_list_del_init(ln);
802 + tmp = list_entry(ln->ln_alive_list.next,
803 + struct htree_lock_node, ln_alive_list);
805 + BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
806 + htree_key_list_replace_init(ln, tmp);
807 + list_del_init(&ln->ln_alive_list);
809 + htree_spin_unlock(lck->lk_head, dep);
811 +EXPORT_SYMBOL(htree_node_stop_listen);
813 +/* release all child-locks if we have any */
815 +htree_node_release_all(struct htree_lock *lck)
819 + for (i = 0; i < lck->lk_depth; i++) {
820 + if (htree_node_is_granted(lck, i))
821 + htree_node_unlock(lck, i, NULL);
822 + else if (htree_node_is_listening(lck, i))
823 + htree_node_stop_listen(lck, i);
828 + * obtain htree lock, it could be blocked inside if there's conflict
829 + * with any granted or blocked lock and @wait is true.
830 + * NB: ALWAYS called holding lhead::lh_lock
833 +htree_lock_internal(struct htree_lock *lck, int wait)
835 + struct htree_lock_head *lhead = lck->lk_head;
840 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
841 + if (lhead->lh_ngranted[i] != 0)
843 + if (lhead->lh_nblocked[i] != 0)
846 + if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
847 + (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
848 + /* will block current lock even it just conflicts with any
849 + * other blocked lock, so lock like EX wouldn't starve */
852 + lhead->lh_nblocked[lck->lk_mode]++;
853 + lk_block_inc(lck->lk_mode);
855 + lck->lk_task = current;
856 + list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
859 + set_current_state(TASK_UNINTERRUPTIBLE);
860 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
861 + /* wait to be given the lock */
862 + if (lck->lk_task != NULL)
864 + /* granted, no doubt. wake up will set me RUNNING.
865 + * Since thread would be waken up accidentally,
866 + * so we need check lock whether granted or not again. */
867 + if (!list_empty(&lck->lk_blocked_list)) {
868 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
869 + if (list_empty(&lck->lk_blocked_list)) {
870 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
875 + return 0; /* without lh_lock */
877 + lhead->lh_ngranted[lck->lk_mode]++;
878 + lk_grant_inc(lck->lk_mode);
882 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
884 +htree_unlock_internal(struct htree_lock *lck)
886 + struct htree_lock_head *lhead = lck->lk_head;
887 + struct htree_lock *tmp;
888 + struct htree_lock *tmp2;
892 + BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
894 + lhead->lh_ngranted[lck->lk_mode]--;
895 + lck->lk_mode = HTREE_LOCK_INVAL;
897 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
898 + if (lhead->lh_ngranted[i] != 0)
901 + list_for_each_entry_safe(tmp, tmp2,
902 + &lhead->lh_blocked_list, lk_blocked_list) {
903 + /* conflict with any granted lock? */
904 + if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
907 + list_del_init(&tmp->lk_blocked_list);
909 + BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
911 + lhead->lh_nblocked[tmp->lk_mode]--;
912 + lhead->lh_ngranted[tmp->lk_mode]++;
913 + granted |= 1 << tmp->lk_mode;
915 + BUG_ON(tmp->lk_task == NULL);
916 + wake_up_process(tmp->lk_task);
920 +/* it's wrapper of htree_lock_internal and exported interface.
921 + * It always return 1 with granted lock if @wait is true, it can return 0
922 + * if @wait is false and locking request can't be granted immediately */
924 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
925 + htree_lock_mode_t mode, int wait)
929 + BUG_ON(lck->lk_depth > lhead->lh_depth);
930 + BUG_ON(lck->lk_head != NULL);
931 + BUG_ON(lck->lk_task != NULL);
933 + lck->lk_head = lhead;
934 + lck->lk_mode = mode;
936 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
937 + rc = htree_lock_internal(lck, wait);
939 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
942 +EXPORT_SYMBOL(htree_lock_try);
944 +/* it's wrapper of htree_unlock_internal and exported interface.
945 + * It will release all htree_node_locks and htree_lock */
947 +htree_unlock(struct htree_lock *lck)
949 + BUG_ON(lck->lk_head == NULL);
950 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
952 + htree_node_release_all(lck);
954 + htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
955 + htree_unlock_internal(lck);
956 + htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
957 + lck->lk_head = NULL;
958 + lck->lk_task = NULL;
960 +EXPORT_SYMBOL(htree_unlock);
962 +/* change lock mode */
964 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
966 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
967 + lck->lk_mode = mode;
969 +EXPORT_SYMBOL(htree_change_mode);
971 +/* release htree lock, and lock it again with new mode.
972 + * This function will first release all htree_node_locks and htree_lock,
973 + * then try to gain htree_lock with new @mode.
974 + * It always return 1 with granted lock if @wait is true, it can return 0
975 + * if @wait is false and locking request can't be granted immediately */
977 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
979 + struct htree_lock_head *lhead = lck->lk_head;
982 + BUG_ON(lhead == NULL);
983 + BUG_ON(lck->lk_mode == mode);
984 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
986 + htree_node_release_all(lck);
988 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
989 + htree_unlock_internal(lck);
990 + lck->lk_mode = mode;
991 + rc = htree_lock_internal(lck, wait);
993 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
996 +EXPORT_SYMBOL(htree_change_lock_try);
998 +/* create a htree_lock head with @depth levels (number of child-locks),
999 + * it is a per resoruce structure */
1000 +struct htree_lock_head *
1001 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
1003 + struct htree_lock_head *lhead;
1006 + if (depth > HTREE_LOCK_DEP_MAX) {
1007 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1008 + depth, HTREE_LOCK_DEP_MAX);
1012 + lhead = kzalloc(offsetof(struct htree_lock_head,
1013 + lh_children[depth]) + priv, GFP_NOFS);
1014 + if (lhead == NULL)
1017 + if (hbits < HTREE_HBITS_MIN)
1018 + hbits = HTREE_HBITS_MIN;
1019 + else if (hbits > HTREE_HBITS_MAX)
1020 + hbits = HTREE_HBITS_MAX;
1022 + lhead->lh_hbits = hbits;
1023 + lhead->lh_lock = 0;
1024 + lhead->lh_depth = depth;
1025 + INIT_LIST_HEAD(&lhead->lh_blocked_list);
1027 + lhead->lh_private = (void *)lhead +
1028 + offsetof(struct htree_lock_head, lh_children[depth]);
1031 + for (i = 0; i < depth; i++) {
1032 + INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
1033 + lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
1037 +EXPORT_SYMBOL(htree_lock_head_alloc);
1039 +/* free the htree_lock head */
1041 +htree_lock_head_free(struct htree_lock_head *lhead)
1045 + BUG_ON(!list_empty(&lhead->lh_blocked_list));
1046 + for (i = 0; i < lhead->lh_depth; i++)
1047 + BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
1050 +EXPORT_SYMBOL(htree_lock_head_free);
1052 +/* register event callback for @events of child-lock at level @dep */
1054 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
1055 + unsigned events, htree_event_cb_t callback)
1057 + BUG_ON(lhead->lh_depth <= dep);
1058 + lhead->lh_children[dep].lc_events = events;
1059 + lhead->lh_children[dep].lc_callback = callback;
1061 +EXPORT_SYMBOL(htree_lock_event_attach);
1063 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
1064 + * extra-bytes as private data for caller */
1065 +struct htree_lock *
1066 +htree_lock_alloc(unsigned depth, unsigned pbytes)
1068 + struct htree_lock *lck;
1069 + int i = offsetof(struct htree_lock, lk_nodes[depth]);
1071 + if (depth > HTREE_LOCK_DEP_MAX) {
1072 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
1073 + depth, HTREE_LOCK_DEP_MAX);
1076 + lck = kzalloc(i + pbytes, GFP_NOFS);
1081 + lck->lk_private = (void *)lck + i;
1082 + lck->lk_mode = HTREE_LOCK_INVAL;
1083 + lck->lk_depth = depth;
1084 + INIT_LIST_HEAD(&lck->lk_blocked_list);
1086 + for (i = 0; i < depth; i++) {
1087 + struct htree_lock_node *node = &lck->lk_nodes[i];
1089 + node->ln_mode = HTREE_LOCK_INVAL;
1090 + INIT_LIST_HEAD(&node->ln_major_list);
1091 + INIT_LIST_HEAD(&node->ln_minor_list);
1092 + INIT_LIST_HEAD(&node->ln_alive_list);
1093 + INIT_LIST_HEAD(&node->ln_blocked_list);
1094 + INIT_LIST_HEAD(&node->ln_granted_list);
1099 +EXPORT_SYMBOL(htree_lock_alloc);
1101 +/* free htree_lock node */
1103 +htree_lock_free(struct htree_lock *lck)
1105 + BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1108 +EXPORT_SYMBOL(htree_lock_free);
1109 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1110 ===================================================================
1111 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile
1112 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
1113 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
1115 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
1116 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
1118 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
1119 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
1120 xattr_trusted.o inline.o
1121 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1122 ===================================================================
1123 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
1124 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
1126 #include <linux/mutex.h>
1127 #include <linux/timer.h>
1128 #include <linux/wait.h>
1129 +#include <linux/htree_lock.h>
1130 #include <linux/blockgroup_lock.h>
1131 #include <linux/percpu_counter.h>
1132 #include <linux/ratelimit.h>
1133 @@ -821,6 +822,9 @@ struct ext4_inode_info {
1135 ext4_fsblk_t i_file_acl;
1137 + /* following fields for parallel directory operations -bzzz */
1138 + struct semaphore i_append_sem;
1141 * i_block_group is the number of the block group which contains
1142 * this file's inode. Constant across the lifetime of the inode,
1143 @@ -1846,6 +1850,71 @@ struct dx_hash_info
1145 #define HASH_NB_ALWAYS 1
1147 +/* assume name-hash is protected by upper layer */
1148 +#define EXT4_HTREE_LOCK_HASH 0
1150 +enum ext4_pdo_lk_types {
1151 +#if EXT4_HTREE_LOCK_HASH
1154 + EXT4_LK_DX, /* index block */
1155 + EXT4_LK_DE, /* directory entry block */
1156 + EXT4_LK_SPIN, /* spinlock */
1160 +/* read-only bit */
1161 +#define EXT4_LB_RO(b) (1 << (b))
1162 +/* read + write, high bits for writer */
1163 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
1165 +enum ext4_pdo_lock_bits {
1166 + /* DX lock bits */
1167 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
1168 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
1169 + /* DE lock bits */
1170 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
1171 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
1172 + /* DX spinlock bits */
1173 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
1174 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
1175 + /* accurate searching */
1176 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
1179 +enum ext4_pdo_lock_opc {
1181 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
1182 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
1184 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
1186 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
1189 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
1191 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
1192 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
1195 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
1196 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
1198 +extern struct htree_lock *ext4_htree_lock_alloc(void);
1199 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
1201 +extern void ext4_htree_lock(struct htree_lock *lck,
1202 + struct htree_lock_head *lhead,
1203 + struct inode *dir, unsigned flags);
1204 +#define ext4_htree_unlock(lck) htree_unlock(lck)
1206 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
1207 + const struct qstr *d_name,
1208 + struct ext4_dir_entry_2 **res_dir,
1209 + int *inlined, struct htree_lock *lck);
1210 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1211 + struct inode *inode, struct htree_lock *lck);
1214 * Describe an inode's exact location on disk and in memory
1215 @@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in
1216 const char *name, int namelen, void *data);
1217 static inline void ext4_update_dx_flag(struct inode *inode)
1219 + /* Disable it for ldiskfs, because going from a DX directory to
1220 + * a non-DX directory while it is in use will completely break
1221 + * the htree-locking.
1222 + * If we really want to support this operation in the future,
1223 + * we need to exclusively lock the directory at here which will
1224 + * increase complexity of code */
1226 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
1227 EXT4_FEATURE_COMPAT_DIR_INDEX))
1228 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1231 static unsigned char ext4_filetype_table[] = {
1232 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1233 Index: linux-3.10.0-1062.el7.x86_64/fs/ext4/namei.c
1234 ===================================================================
1235 --- linux-3.10.0-1062.el7.x86_64.orig/fs/ext4/namei.c
1236 +++ linux-3.10.0-1062.el7.x86_64/fs/ext4/namei.c
1237 @@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
1240 struct buffer_head *bh;
1241 + struct ext4_inode_info *ei = EXT4_I(inode);
1244 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1245 @@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
1246 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1247 return ERR_PTR(-ENOSPC);
1249 + /* with parallel dir operations all appends
1250 + * have to be serialized -bzzz */
1251 + down(&ei->i_append_sem);
1253 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1255 bh = ext4_bread(handle, inode, *block, 1, &err);
1258 + up(&ei->i_append_sem);
1259 return ERR_PTR(err);
1261 inode->i_size += inode->i_sb->s_blocksize;
1262 EXT4_I(inode)->i_disksize = inode->i_size;
1263 BUFFER_TRACE(bh, "get_write_access");
1264 err = ext4_journal_get_write_access(handle, bh);
1265 + up(&ei->i_append_sem);
1268 ext4_std_error(inode->i_sb, err);
1269 @@ -247,7 +255,7 @@ static struct dx_frame *dx_probe(const s
1271 struct dx_hash_info *hinfo,
1272 struct dx_frame *frame,
1274 + struct htree_lock *lck, int *err);
1275 static void dx_release(struct dx_frame *frames);
1276 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1277 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
1278 @@ -260,13 +268,13 @@ static void dx_insert_block(struct dx_fr
1279 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1280 struct dx_frame *frame,
1281 struct dx_frame *frames,
1282 - __u32 *start_hash);
1283 + __u32 *start_hash, struct htree_lock *lck);
1284 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1285 const struct qstr *d_name,
1286 struct ext4_dir_entry_2 **res_dir,
1288 + struct htree_lock *lck, int *err);
1289 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1290 - struct inode *inode);
1291 + struct inode *inode, struct htree_lock *lck);
1293 /* checksumming functions */
1294 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1295 @@ -670,6 +678,227 @@ struct stats dx_show_entries(struct dx_h
1297 #endif /* DX_DEBUG */
1299 +/* private data for htree_lock */
1300 +struct ext4_dir_lock_data {
1301 + unsigned ld_flags; /* bits-map for lock types */
1302 + unsigned ld_count; /* # entries of the last DX block */
1303 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
1304 + struct dx_entry *ld_at; /* position of leaf dx_entry */
1307 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
1308 +#define ext4_find_entry(dir, name, dirent, inline) \
1309 + __ext4_find_entry(dir, name, dirent, inline, NULL)
1310 +#define ext4_add_entry(handle, dentry, inode) \
1311 + __ext4_add_entry(handle, dentry, inode, NULL)
1313 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1314 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
1316 +static void ext4_htree_event_cb(void *target, void *event)
1318 + u64 *block = (u64 *)target;
1320 + if (*block == dx_get_block((struct dx_entry *)event))
1321 + *block = EXT4_HTREE_NODE_CHANGED;
1324 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1326 + struct htree_lock_head *lhead;
1328 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1329 + if (lhead != NULL) {
1330 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1331 + ext4_htree_event_cb);
1335 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1337 +struct htree_lock *ext4_htree_lock_alloc(void)
1339 + return htree_lock_alloc(EXT4_LK_MAX,
1340 + sizeof(struct ext4_dir_lock_data));
1342 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1344 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1347 + default: /* 0 or unknown flags require EX lock */
1348 + return HTREE_LOCK_EX;
1349 + case EXT4_HLOCK_READDIR:
1350 + return HTREE_LOCK_PR;
1351 + case EXT4_HLOCK_LOOKUP:
1352 + return HTREE_LOCK_CR;
1353 + case EXT4_HLOCK_DEL:
1354 + case EXT4_HLOCK_ADD:
1355 + return HTREE_LOCK_CW;
1359 +/* return PR for read-only operations, otherwise return EX */
1360 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1362 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1364 + /* 0 requires EX lock */
1365 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1368 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1372 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1375 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1377 + if (writer) /* all readers & writers are excluded? */
1378 + return lck->lk_mode == HTREE_LOCK_EX;
1380 + /* all writers are excluded? */
1381 + return lck->lk_mode == HTREE_LOCK_PR ||
1382 + lck->lk_mode == HTREE_LOCK_PW ||
1383 + lck->lk_mode == HTREE_LOCK_EX;
1386 +/* relock htree_lock with EX mode if it's change operation, otherwise
1387 + * relock it with PR mode. It's noop if PDO is disabled. */
1388 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1390 + if (!ext4_htree_safe_locked(lck)) {
1391 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1393 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
1397 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1398 + struct inode *dir, unsigned flags)
1400 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1401 + ext4_htree_safe_mode(flags);
1403 + ext4_htree_lock_data(lck)->ld_flags = flags;
1404 + htree_lock(lck, lhead, mode);
1406 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1408 +EXPORT_SYMBOL(ext4_htree_lock);
1410 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1411 + unsigned lmask, int wait, void *ev)
1413 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
1416 + /* NOOP if htree is well protected or caller doesn't require the lock */
1417 + if (ext4_htree_safe_locked(lck) ||
1418 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1421 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1422 + HTREE_LOCK_PW : HTREE_LOCK_PR;
1424 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1426 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1428 + cpu_relax(); /* spin until granted */
1432 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1434 + return ext4_htree_safe_locked(lck) ||
1435 + htree_node_is_granted(lck, ffz(~lmask));
1438 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1439 + unsigned lmask, void *buf)
1441 + /* NB: it's safe to call mutiple times or even it's not locked */
1442 + if (!ext4_htree_safe_locked(lck) &&
1443 + htree_node_is_granted(lck, ffz(~lmask)))
1444 + htree_node_unlock(lck, ffz(~lmask), buf);
1447 +#define ext4_htree_dx_lock(lck, key) \
1448 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1449 +#define ext4_htree_dx_lock_try(lck, key) \
1450 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1451 +#define ext4_htree_dx_unlock(lck) \
1452 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1453 +#define ext4_htree_dx_locked(lck) \
1454 + ext4_htree_node_locked(lck, EXT4_LB_DX)
1456 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1458 + struct ext4_dir_lock_data *ld;
1460 + if (ext4_htree_safe_locked(lck))
1463 + ld = ext4_htree_lock_data(lck);
1464 + switch (ld->ld_flags) {
1467 + case EXT4_HLOCK_LOOKUP:
1468 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1470 + case EXT4_HLOCK_DEL:
1471 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1473 + case EXT4_HLOCK_ADD:
1474 + ld->ld_flags = EXT4_HLOCK_SPLIT;
1479 +#define ext4_htree_de_lock(lck, key) \
1480 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1481 +#define ext4_htree_de_unlock(lck) \
1482 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1484 +#define ext4_htree_spin_lock(lck, key, event) \
1485 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1486 +#define ext4_htree_spin_unlock(lck) \
1487 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1488 +#define ext4_htree_spin_unlock_listen(lck, p) \
1489 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1491 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1493 + if (!ext4_htree_safe_locked(lck) &&
1494 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1495 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1499 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
1500 + DX_HASH_COL_YES, /* there is collision and it does matter */
1501 + DX_HASH_COL_NO, /* there is no collision */
1504 +static int dx_probe_hash_collision(struct htree_lock *lck,
1505 + struct dx_entry *entries,
1506 + struct dx_entry *at, u32 hash)
1508 + if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1509 + return DX_HASH_COL_IGNORE; /* don't care about collision */
1511 + } else if (at == entries + dx_get_count(entries) - 1) {
1512 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1514 + } else { /* hash collision? */
1515 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
1516 + DX_HASH_COL_YES : DX_HASH_COL_NO;
1521 * Probe for a directory leaf block to search.
1523 @@ -681,10 +910,11 @@ struct stats dx_show_entries(struct dx_h
1525 static struct dx_frame *
1526 dx_probe(const struct qstr *d_name, struct inode *dir,
1527 - struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
1528 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1529 + struct htree_lock *lck, int *err)
1531 unsigned count, indirect;
1532 - struct dx_entry *at, *entries, *p, *q, *m;
1533 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1534 struct dx_root_info *info;
1535 struct buffer_head *bh;
1536 struct dx_frame *frame = frame_in;
1537 @@ -758,8 +988,15 @@ dx_probe(const struct qstr *d_name, stru
1538 dxtrace(printk("Look up %x", hash));
1541 + if (indirect == 0) { /* the last index level */
1542 + /* NB: ext4_htree_dx_lock() could be noop if
1543 + * DX-lock flag is not set for current operation */
1544 + ext4_htree_dx_lock(lck, dx);
1545 + ext4_htree_spin_lock(lck, dx, NULL);
1547 count = dx_get_count(entries);
1548 - if (!count || count > dx_get_limit(entries)) {
1549 + if (count == 0 || count > dx_get_limit(entries)) {
1550 + ext4_htree_spin_unlock(lck); /* release spin */
1551 ext4_warning(dir->i_sb,
1552 "dx entry: no count or count > limit");
1554 @@ -800,7 +1037,70 @@ dx_probe(const struct qstr *d_name, stru
1556 frame->entries = entries;
1558 - if (!indirect--) return frame;
1560 + if (indirect == 0) { /* the last index level */
1561 + struct ext4_dir_lock_data *ld;
1564 + /* By default we only lock DE-block, however, we will
1565 + * also lock the last level DX-block if:
1566 + * a) there is hash collision
1567 + * we will set DX-lock flag (a few lines below)
1568 + * and redo to lock DX-block
1569 + * see detail in dx_probe_hash_collision()
1570 + * b) it's a retry from splitting
1571 + * we need to lock the last level DX-block so nobody
1572 + * else can split any leaf blocks under the same
1573 + * DX-block, see detail in ext4_dx_add_entry()
1575 + if (ext4_htree_dx_locked(lck)) {
1576 + /* DX-block is locked, just lock DE-block
1578 + ext4_htree_spin_unlock(lck);
1579 + if (!ext4_htree_safe_locked(lck))
1580 + ext4_htree_de_lock(lck, frame->at);
1583 + /* it's pdirop and no DX lock */
1584 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
1585 + DX_HASH_COL_YES) {
1586 + /* found hash collision, set DX-lock flag
1587 + * and retry to abtain DX-lock */
1588 + ext4_htree_spin_unlock(lck);
1589 + ext4_htree_dx_need_lock(lck);
1592 + ld = ext4_htree_lock_data(lck);
1593 + /* because I don't lock DX, so @at can't be trusted
1594 + * after I release spinlock so I have to save it */
1596 + ld->ld_at_entry = *at;
1597 + ld->ld_count = dx_get_count(entries);
1599 + frame->at = &ld->ld_at_entry;
1600 + myblock = dx_get_block(at);
1602 + /* NB: ordering locking */
1603 + ext4_htree_spin_unlock_listen(lck, &myblock);
1604 + /* other thread can split this DE-block because:
1605 + * a) I don't have lock for the DE-block yet
1606 + * b) I released spinlock on DX-block
1607 + * if it happened I can detect it by listening
1608 + * splitting event on this DE-block */
1609 + ext4_htree_de_lock(lck, frame->at);
1610 + ext4_htree_spin_stop_listen(lck);
1612 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
1613 + /* someone split this DE-block before
1614 + * I locked it, I need to retry and lock
1615 + * valid DE-block */
1616 + ext4_htree_de_unlock(lck);
1623 bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1626 @@ -868,7 +1168,7 @@ static void dx_release (struct dx_frame
1627 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1628 struct dx_frame *frame,
1629 struct dx_frame *frames,
1630 - __u32 *start_hash)
1631 + __u32 *start_hash, struct htree_lock *lck)
1634 struct buffer_head *bh;
1635 @@ -883,12 +1183,22 @@ static int ext4_htree_next_block(struct
1636 * this loop, num_frames indicates the number of interior
1637 * nodes need to be read.
1639 + ext4_htree_de_unlock(lck);
1641 - if (++(p->at) < p->entries + dx_get_count(p->entries))
1643 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1644 + /* num_frames > 0 :
1646 + * ext4_htree_dx_locked:
1647 + * frame->at is reliable pointer returned by dx_probe,
1648 + * otherwise dx_probe already knew no collision */
1649 + if (++(p->at) < p->entries + dx_get_count(p->entries))
1655 + if (num_frames == 1)
1656 + ext4_htree_dx_unlock(lck);
1660 @@ -911,6 +1221,13 @@ static int ext4_htree_next_block(struct
1661 * block so no check is necessary
1663 while (num_frames--) {
1664 + if (num_frames == 0) {
1665 + /* it's not always necessary, we just don't want to
1666 + * detect hash collision again */
1667 + ext4_htree_dx_need_lock(lck);
1668 + ext4_htree_dx_lock(lck, p->at);
1671 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1674 @@ -919,6 +1236,7 @@ static int ext4_htree_next_block(struct
1676 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1678 + ext4_htree_de_lock(lck, p->at);
1682 @@ -1021,10 +1339,10 @@ int ext4_htree_fill_tree(struct file *di
1684 hinfo.hash = start_hash;
1685 hinfo.minor_hash = 0;
1686 - frame = dx_probe(NULL, dir, &hinfo, frames, &err);
1687 + /* assume it's PR locked */
1688 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
1692 /* Add '.' and '..' from the htree header */
1693 if (!start_hash && !start_minor_hash) {
1694 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1695 @@ -1051,7 +1369,7 @@ int ext4_htree_fill_tree(struct file *di
1698 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1699 - frame, frames, &hashval);
1700 + frame, frames, &hashval, NULL);
1701 *next_hash = hashval;
1704 @@ -1244,10 +1562,10 @@ static int is_dx_internal_node(struct in
1705 * The returned buffer_head has ->b_count elevated. The caller is expected
1706 * to brelse() it when appropriate.
1708 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1709 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1710 const struct qstr *d_name,
1711 struct ext4_dir_entry_2 **res_dir,
1713 + int *inlined, struct htree_lock *lck)
1715 struct super_block *sb;
1716 struct buffer_head *bh_use[NAMEI_RA_SIZE];
1717 @@ -1291,7 +1609,7 @@ static struct buffer_head * ext4_find_en
1721 - bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1722 + bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
1724 * On success, or if the error was file not found,
1725 * return. Otherwise, fall back to doing a search the
1726 @@ -1305,6 +1623,7 @@ static struct buffer_head * ext4_find_en
1728 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1730 + ext4_htree_safe_relock(lck);
1733 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1734 @@ -1402,9 +1721,12 @@ cleanup_and_exit:
1735 brelse(bh_use[ra_ptr]);
1738 +EXPORT_SYMBOL(__ext4_find_entry);
1740 -static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1741 - struct ext4_dir_entry_2 **res_dir, int *err)
1742 +static struct buffer_head *ext4_dx_find_entry(struct inode *dir,
1743 + const struct qstr *d_name,
1744 + struct ext4_dir_entry_2 **res_dir,
1745 + struct htree_lock *lck, int *err)
1747 struct super_block * sb = dir->i_sb;
1748 struct dx_hash_info hinfo;
1749 @@ -1413,7 +1735,7 @@ static struct buffer_head * ext4_dx_find
1753 - if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1754 + if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
1757 block = dx_get_block(frame->at);
1758 @@ -1437,7 +1759,7 @@ static struct buffer_head * ext4_dx_find
1760 /* Check to see if we should continue to search */
1761 retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1763 + frames, NULL, lck);
1766 "error reading index page in directory #%lu",
1767 @@ -1597,8 +1919,9 @@ static struct ext4_dir_entry_2* dx_pack_
1768 * Returns pointer to de in block into which the new entry will be inserted.
1770 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1771 - struct buffer_head **bh,struct dx_frame *frame,
1772 - struct dx_hash_info *hinfo, int *error)
1773 + struct buffer_head **bh, struct dx_frame *frames,
1774 + struct dx_frame *frame, struct dx_hash_info *hinfo,
1775 + struct htree_lock *lck, int *error)
1777 unsigned blocksize = dir->i_sb->s_blocksize;
1778 unsigned count, continued;
1779 @@ -1661,7 +1984,14 @@ static struct ext4_dir_entry_2 *do_split
1780 hash2, split, count-split));
1782 /* Fancy dance to stay within two buffers */
1783 - de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1784 + if (hinfo->hash < hash2) {
1785 + de2 = dx_move_dirents(data1, data2, map + split,
1786 + count - split, blocksize);
1788 + /* make sure we will add entry to the same block which
1789 + * we have already locked */
1790 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1792 de = dx_pack_dirents(data1, blocksize);
1793 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1795 @@ -1680,13 +2010,21 @@ static struct ext4_dir_entry_2 *do_split
1796 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1797 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1799 - /* Which block gets the new entry? */
1800 - if (hinfo->hash >= hash2)
1804 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1805 + frame->at); /* notify block is being split */
1806 + if (hinfo->hash < hash2) {
1807 + dx_insert_block(frame, hash2 + continued, newblock);
1810 + /* switch block number */
1811 + dx_insert_block(frame, hash2 + continued,
1812 + dx_get_block(frame->at));
1813 + dx_set_block(frame->at, newblock);
1816 - dx_insert_block(frame, hash2 + continued, newblock);
1817 + ext4_htree_spin_unlock(lck);
1818 + ext4_htree_dx_unlock(lck);
1820 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1823 @@ -1965,7 +2303,7 @@ static int make_indexed_dir(handle_t *ha
1827 - de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
1828 + de = do_split(handle, dir, &bh2, frames, frame, &hinfo, NULL, &retval);
1832 @@ -2072,8 +2410,8 @@ out:
1833 * may not sleep between calling this and putting something into
1834 * the entry, as someone else might have used it while you slept.
1836 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1837 - struct inode *inode)
1838 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1839 + struct inode *inode, struct htree_lock *lck)
1841 struct inode *dir = dentry->d_parent->d_inode;
1842 struct buffer_head *bh = NULL;
1843 @@ -2108,9 +2446,10 @@ static int ext4_add_entry(handle_t *hand
1844 if (dentry->d_name.len == 2 &&
1845 memcmp(dentry->d_name.name, "..", 2) == 0)
1846 return ext4_update_dotdot(handle, dentry, inode);
1847 - retval = ext4_dx_add_entry(handle, dentry, inode);
1848 + retval = ext4_dx_add_entry(handle, dentry, inode, lck);
1849 if (!retval || (retval != ERR_BAD_DX_DIR))
1851 + ext4_htree_safe_relock(lck);
1852 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1854 ext4_mark_inode_dirty(handle, dir);
1855 @@ -2152,12 +2491,13 @@ out:
1856 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1859 +EXPORT_SYMBOL(__ext4_add_entry);
1862 * Returns 0 for success, or a negative error value
1864 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1865 - struct inode *inode)
1866 + struct inode *inode, struct htree_lock *lck)
1868 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1869 struct dx_entry *entries, *at;
1870 @@ -2171,7 +2511,7 @@ static int ext4_dx_add_entry(handle_t *h
1874 - frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1875 + frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
1878 entries = frame->entries;
1879 @@ -2201,6 +2541,11 @@ again:
1880 struct dx_node *node2;
1881 struct buffer_head *bh2;
1883 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1884 + ext4_htree_safe_relock(lck);
1888 while (frame > frames) {
1889 if (dx_get_count((frame - 1)->entries) <
1890 dx_get_limit((frame - 1)->entries)) {
1891 @@ -2304,16 +2649,43 @@ again:
1895 + } else if (!ext4_htree_dx_locked(lck)) {
1896 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1898 + /* not well protected, require DX lock */
1899 + ext4_htree_dx_need_lock(lck);
1900 + at = frame > frames ? (frame - 1)->at : NULL;
1902 + /* NB: no risk of deadlock because it's just a try.
1904 + * NB: we check ld_count for twice, the first time before
1905 + * having DX lock, the second time after holding DX lock.
1907 + * NB: We never free blocks for directory so far, which
1908 + * means value returned by dx_get_count() should equal to
1909 + * ld->ld_count if nobody split any DE-block under @at,
1910 + * and ld->ld_at still points to valid dx_entry. */
1911 + if ((ld->ld_count != dx_get_count(entries)) ||
1912 + !ext4_htree_dx_lock_try(lck, at) ||
1913 + (ld->ld_count != dx_get_count(entries))) {
1917 + /* OK, I've got DX lock and nothing changed */
1918 + frame->at = ld->ld_at;
1920 - de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1921 + de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
1925 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1929 ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
1931 + ext4_htree_dx_unlock(lck);
1932 + ext4_htree_de_unlock(lck);
1935 /* @restart is true means htree-path has been changed, we need to
1936 Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1937 ===================================================================
1938 --- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c
1939 +++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
1940 @@ -875,6 +875,7 @@ static struct inode *ext4_alloc_inode(st
1942 ei->vfs_inode.i_version = 1;
1943 spin_lock_init(&ei->i_raw_lock);
1944 + sema_init(&ei->i_append_sem, 1);
1945 INIT_LIST_HEAD(&ei->i_prealloc_list);
1946 spin_lock_init(&ei->i_prealloc_lock);
1947 ext4_es_init_tree(&ei->i_es_tree);