1 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
2 index f52cf54..3f16939 100644
5 @@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
7 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
10 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
11 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
12 xattr_trusted.o inline.o readpage.o sysfs.o
13 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
14 index 2d22f1a..005c9b3 100644
18 #include <linux/timer.h>
19 #include <linux/version.h>
20 #include <linux/wait.h>
21 +#include <linux/htree_lock.h>
22 #include <linux/blockgroup_lock.h>
23 #include <linux/percpu_counter.h>
24 #include <linux/ratelimit.h>
25 @@ -880,6 +881,9 @@ struct ext4_inode_info {
27 ext4_fsblk_t i_file_acl;
29 + /* following fields for parallel directory operations -bzzz */
30 + struct semaphore i_append_sem;
33 * i_block_group is the number of the block group which contains
34 * this file's inode. Constant across the lifetime of the inode,
35 @@ -2086,6 +2090,71 @@ struct dx_hash_info
39 +/* assume name-hash is protected by upper layer */
40 +#define EXT4_HTREE_LOCK_HASH 0
42 +enum ext4_pdo_lk_types {
43 +#if EXT4_HTREE_LOCK_HASH
46 + EXT4_LK_DX, /* index block */
47 + EXT4_LK_DE, /* directory entry block */
48 + EXT4_LK_SPIN, /* spinlock */
53 +#define EXT4_LB_RO(b) (1 << (b))
54 +/* read + write, high bits for writer */
55 +#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
57 +enum ext4_pdo_lock_bits {
59 + EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
60 + EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
62 + EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
63 + EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
64 + /* DX spinlock bits */
65 + EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
66 + EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
67 + /* accurate searching */
68 + EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
71 +enum ext4_pdo_lock_opc {
73 + EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
74 + EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
76 + EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
78 + EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
81 + EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
83 + EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
84 + EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
87 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
88 +#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
90 +extern struct htree_lock *ext4_htree_lock_alloc(void);
91 +#define ext4_htree_lock_free(lck) htree_lock_free(lck)
93 +extern void ext4_htree_lock(struct htree_lock *lck,
94 + struct htree_lock_head *lhead,
95 + struct inode *dir, unsigned flags);
96 +#define ext4_htree_unlock(lck) htree_unlock(lck)
98 +extern struct buffer_head *__ext4_find_entry(struct inode *dir,
99 + const struct qstr *d_name,
100 + struct ext4_dir_entry_2 **res_dir,
101 + int *inlined, struct htree_lock *lck);
102 +extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
103 + struct inode *inode, struct htree_lock *lck);
105 /* 32 and 64 bit signed EOF for dx directories */
106 #define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
107 @@ -2475,8 +2544,16 @@ int ext4_insert_dentry(struct inode *dir,
108 struct ext4_filename *fname, void *data);
109 static inline void ext4_update_dx_flag(struct inode *inode)
111 + /* Disable it for ldiskfs, because going from a DX directory to
112 + * a non-DX directory while it is in use will completely break
113 + * the htree-locking.
114 + * If we really want to support this operation in the future,
115 + * we need to exclusively lock the directory at here which will
116 + * increase complexity of code */
118 if (!ext4_has_feature_dir_index(inode->i_sb))
119 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
122 static unsigned char ext4_filetype_table[] = {
123 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
124 diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
126 index 0000000..99e7375
128 +++ b/fs/ext4/htree_lock.c
131 + * fs/ext4/htree_lock.c
133 + * Copyright (c) 2011, 2012, Intel Corporation.
135 + * Author: Liang Zhen <liang@whamcloud.com>
137 +#include <linux/jbd2.h>
138 +#include <linux/hash.h>
139 +#include <linux/module.h>
140 +#include <linux/htree_lock.h>
143 + HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
144 + HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
145 + HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
146 + HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
147 + HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
151 + HTREE_LOCK_COMPAT_EX = 0,
152 + HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
153 + HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
154 + HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
155 + HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
159 +static int htree_lock_compat[] = {
160 + [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
161 + [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
162 + [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
163 + [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
164 + [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
167 +/* max allowed htree-lock depth.
168 + * We only need depth=3 for ext4 although user can have higher value. */
169 +#define HTREE_LOCK_DEP_MAX 16
171 +#ifdef HTREE_LOCK_DEBUG
173 +static char *hl_name[] = {
174 + [HTREE_LOCK_EX] "EX",
175 + [HTREE_LOCK_PW] "PW",
176 + [HTREE_LOCK_PR] "PR",
177 + [HTREE_LOCK_CW] "CW",
178 + [HTREE_LOCK_CR] "CR",
182 +struct htree_lock_node_stats {
183 + unsigned long long blocked[HTREE_LOCK_MAX];
184 + unsigned long long granted[HTREE_LOCK_MAX];
185 + unsigned long long retried[HTREE_LOCK_MAX];
186 + unsigned long long events;
189 +struct htree_lock_stats {
190 + struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
191 + unsigned long long granted[HTREE_LOCK_MAX];
192 + unsigned long long blocked[HTREE_LOCK_MAX];
195 +static struct htree_lock_stats hl_stats;
197 +void htree_lock_stat_reset(void)
199 + memset(&hl_stats, 0, sizeof(hl_stats));
202 +void htree_lock_stat_print(int depth)
207 + printk(KERN_DEBUG "HTREE LOCK STATS:\n");
208 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
209 + printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
210 + hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
212 + for (i = 0; i < depth; i++) {
213 + printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
214 + for (j = 0; j < HTREE_LOCK_MAX; j++) {
216 + "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
217 + hl_name[j], hl_stats.nodes[i].granted[j],
218 + hl_stats.nodes[i].blocked[j],
219 + hl_stats.nodes[i].retried[j]);
224 +#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
225 +#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
226 +#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
227 +#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
228 +#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
229 +#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
233 +void htree_lock_stat_reset(void) {}
234 +void htree_lock_stat_print(int depth) {}
236 +#define lk_grant_inc(m) do {} while (0)
237 +#define lk_block_inc(m) do {} while (0)
238 +#define ln_grant_inc(d, m) do {} while (0)
239 +#define ln_block_inc(d, m) do {} while (0)
240 +#define ln_retry_inc(d, m) do {} while (0)
241 +#define ln_event_inc(d) do {} while (0)
245 +EXPORT_SYMBOL(htree_lock_stat_reset);
246 +EXPORT_SYMBOL(htree_lock_stat_print);
248 +#define HTREE_DEP_ROOT (-1)
250 +#define htree_spin_lock(lhead, dep) \
251 + bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
252 +#define htree_spin_unlock(lhead, dep) \
253 + bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
255 +#define htree_key_event_ignore(child, ln) \
256 + (!((child)->lc_events & (1 << (ln)->ln_mode)))
259 +htree_key_list_empty(struct htree_lock_node *ln)
261 + return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
265 +htree_key_list_del_init(struct htree_lock_node *ln)
267 + struct htree_lock_node *tmp = NULL;
269 + if (!list_empty(&ln->ln_minor_list)) {
270 + tmp = list_entry(ln->ln_minor_list.next,
271 + struct htree_lock_node, ln_minor_list);
272 + list_del_init(&ln->ln_minor_list);
275 + if (list_empty(&ln->ln_major_list))
278 + if (tmp == NULL) { /* not on minor key list */
279 + list_del_init(&ln->ln_major_list);
281 + BUG_ON(!list_empty(&tmp->ln_major_list));
282 + list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
287 +htree_key_list_replace_init(struct htree_lock_node *old,
288 + struct htree_lock_node *new)
290 + if (!list_empty(&old->ln_major_list))
291 + list_replace_init(&old->ln_major_list, &new->ln_major_list);
293 + if (!list_empty(&old->ln_minor_list))
294 + list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
298 +htree_key_event_enqueue(struct htree_lock_child *child,
299 + struct htree_lock_node *ln, int dep, void *event)
301 + struct htree_lock_node *tmp;
303 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
304 + BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
305 + if (event == NULL || htree_key_event_ignore(child, ln))
308 + /* shouldn't be a very long list */
309 + list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
310 + if (tmp->ln_mode == HTREE_LOCK_NL) {
312 + if (child->lc_callback != NULL)
313 + child->lc_callback(tmp->ln_ev_target, event);
319 +htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
320 + unsigned dep, int wait, void *event)
322 + struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
323 + struct htree_lock_node *newln = &newlk->lk_nodes[dep];
324 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
326 + /* NB: ALWAYS called holding lhead::lh_lock(dep) */
327 + /* NB: we only expect PR/PW lock mode at here, only these two modes are
328 + * allowed for htree_node_lock(asserted in htree_node_lock_internal),
329 + * NL is only used for listener, user can't directly require NL mode */
330 + if ((curln->ln_mode == HTREE_LOCK_NL) ||
331 + (curln->ln_mode != HTREE_LOCK_PW &&
332 + newln->ln_mode != HTREE_LOCK_PW)) {
333 + /* no conflict, attach it on granted list of @curlk */
334 + if (curln->ln_mode != HTREE_LOCK_NL) {
335 + list_add(&newln->ln_granted_list,
336 + &curln->ln_granted_list);
338 + /* replace key owner */
339 + htree_key_list_replace_init(curln, newln);
342 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
343 + htree_key_event_enqueue(child, newln, dep, event);
344 + ln_grant_inc(dep, newln->ln_mode);
345 + return 1; /* still hold lh_lock */
348 + if (!wait) { /* can't grant and don't want to wait */
349 + ln_retry_inc(dep, newln->ln_mode);
350 + newln->ln_mode = HTREE_LOCK_INVAL;
351 + return -1; /* don't wait and just return -1 */
354 + newlk->lk_task = current;
355 + set_current_state(TASK_UNINTERRUPTIBLE);
356 + /* conflict, attach it on blocked list of curlk */
357 + list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
358 + list_add(&newln->ln_alive_list, &curln->ln_alive_list);
359 + ln_block_inc(dep, newln->ln_mode);
361 + htree_spin_unlock(newlk->lk_head, dep);
362 + /* wait to be given the lock */
363 + if (newlk->lk_task != NULL)
365 + /* granted, no doubt, wake up will set me RUNNING */
366 + if (event == NULL || htree_key_event_ignore(child, newln))
367 + return 0; /* granted without lh_lock */
369 + htree_spin_lock(newlk->lk_head, dep);
370 + htree_key_event_enqueue(child, newln, dep, event);
371 + return 1; /* still hold lh_lock */
375 + * get PR/PW access to particular tree-node according to @dep and @key,
376 + * it will return -1 if @wait is false and can't immediately grant this lock.
377 + * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
378 + * @event if it's not NULL.
379 + * NB: ALWAYS called holding lhead::lh_lock
382 +htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
383 + htree_lock_mode_t mode, u32 key, unsigned dep,
384 + int wait, void *event)
387 + struct htree_lock *tmp;
388 + struct htree_lock *tmp2;
395 + BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
396 + BUG_ON(htree_node_is_granted(lck, dep));
398 + key = hash_long(key, lhead->lh_hbits);
400 + mi_bits = lhead->lh_hbits >> 1;
401 + ma_bits = lhead->lh_hbits - mi_bits;
403 + lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
404 + lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
405 + lck->lk_nodes[dep].ln_mode = mode;
408 + * The major key list is an ordered list, so searches are started
409 + * at the end of the list that is numerically closer to major_key,
410 + * so at most half of the list will be walked (for well-distributed
411 + * keys). The list traversal aborts early if the expected key
412 + * location is passed.
414 + reverse = (major >= (1 << (ma_bits - 1)));
417 + list_for_each_entry_reverse(tmp,
418 + &lhead->lh_children[dep].lc_list,
419 + lk_nodes[dep].ln_major_list) {
420 + if (tmp->lk_nodes[dep].ln_major_key == major) {
423 + } else if (tmp->lk_nodes[dep].ln_major_key < major) {
424 + /* attach _after_ @tmp */
425 + list_add(&lck->lk_nodes[dep].ln_major_list,
426 + &tmp->lk_nodes[dep].ln_major_list);
427 + goto out_grant_major;
431 + list_add(&lck->lk_nodes[dep].ln_major_list,
432 + &lhead->lh_children[dep].lc_list);
433 + goto out_grant_major;
436 + list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
437 + lk_nodes[dep].ln_major_list) {
438 + if (tmp->lk_nodes[dep].ln_major_key == major) {
441 + } else if (tmp->lk_nodes[dep].ln_major_key > major) {
442 + /* insert _before_ @tmp */
443 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
444 + &tmp->lk_nodes[dep].ln_major_list);
445 + goto out_grant_major;
449 + list_add_tail(&lck->lk_nodes[dep].ln_major_list,
450 + &lhead->lh_children[dep].lc_list);
451 + goto out_grant_major;
456 + * NB: minor_key list doesn't have a "head", @list is just a
457 + * temporary stub for helping list searching, make sure it's removed
459 + * minor_key list is an ordered list too.
461 + list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
463 + reverse = (minor >= (1 << (mi_bits - 1)));
466 + list_for_each_entry_reverse(tmp2, &list,
467 + lk_nodes[dep].ln_minor_list) {
468 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
471 + } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
472 + /* attach _after_ @tmp2 */
473 + list_add(&lck->lk_nodes[dep].ln_minor_list,
474 + &tmp2->lk_nodes[dep].ln_minor_list);
475 + goto out_grant_minor;
479 + list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
482 + list_for_each_entry(tmp2, &list,
483 + lk_nodes[dep].ln_minor_list) {
484 + if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
487 + } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
488 + /* insert _before_ @tmp2 */
489 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
490 + &tmp2->lk_nodes[dep].ln_minor_list);
491 + goto out_grant_minor;
495 + list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
499 + if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
500 + /* new lock @lck is the first one on minor_key list, which
501 + * means it has the smallest minor_key and it should
502 + * replace @tmp as minor_key owner */
503 + list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
504 + &lck->lk_nodes[dep].ln_major_list);
506 + /* remove the temporary head */
510 + ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
511 + return 1; /* granted with holding lh_lock */
514 + list_del(&list); /* remove temprary head */
515 + return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
519 + * release the key of @lck at level @dep, and grant any blocked locks.
520 + * caller will still listen on @key if @event is not NULL, which means
521 + * caller can see a event (by event_cb) while granting any lock with
522 + * the same key at level @dep.
523 + * NB: ALWAYS called holding lhead::lh_lock
524 + * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
527 +htree_node_unlock_internal(struct htree_lock_head *lhead,
528 + struct htree_lock *curlk, unsigned dep, void *event)
530 + struct htree_lock_node *curln = &curlk->lk_nodes[dep];
531 + struct htree_lock *grtlk = NULL;
532 + struct htree_lock_node *grtln;
533 + struct htree_lock *poslk;
534 + struct htree_lock *tmplk;
536 + if (!htree_node_is_granted(curlk, dep))
539 + if (!list_empty(&curln->ln_granted_list)) {
540 + /* there is another granted lock */
541 + grtlk = list_entry(curln->ln_granted_list.next,
543 + lk_nodes[dep].ln_granted_list);
544 + list_del_init(&curln->ln_granted_list);
547 + if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
549 + * @curlk is the only granted lock, so we confirmed:
550 + * a) curln is key owner (attached on major/minor_list),
551 + * so if there is any blocked lock, it should be attached
552 + * on curln->ln_blocked_list
553 + * b) we always can grant the first blocked lock
555 + grtlk = list_entry(curln->ln_blocked_list.next,
557 + lk_nodes[dep].ln_blocked_list);
558 + BUG_ON(grtlk->lk_task == NULL);
559 + wake_up_process(grtlk->lk_task);
562 + if (event != NULL &&
563 + lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
564 + curln->ln_ev_target = event;
565 + curln->ln_mode = HTREE_LOCK_NL; /* listen! */
567 + curln->ln_mode = HTREE_LOCK_INVAL;
570 + if (grtlk == NULL) { /* I must be the only one locking this key */
571 + struct htree_lock_node *tmpln;
573 + BUG_ON(htree_key_list_empty(curln));
575 + if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
578 + /* not listening */
579 + if (list_empty(&curln->ln_alive_list)) { /* no more listener */
580 + htree_key_list_del_init(curln);
584 + tmpln = list_entry(curln->ln_alive_list.next,
585 + struct htree_lock_node, ln_alive_list);
587 + BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
589 + htree_key_list_replace_init(curln, tmpln);
590 + list_del_init(&curln->ln_alive_list);
595 + /* have a granted lock */
596 + grtln = &grtlk->lk_nodes[dep];
597 + if (!list_empty(&curln->ln_blocked_list)) {
598 + /* only key owner can be on both lists */
599 + BUG_ON(htree_key_list_empty(curln));
601 + if (list_empty(&grtln->ln_blocked_list)) {
602 + list_add(&grtln->ln_blocked_list,
603 + &curln->ln_blocked_list);
605 + list_del_init(&curln->ln_blocked_list);
608 + * NB: this is the tricky part:
609 + * We have only two modes for child-lock (PR and PW), also,
610 + * only owner of the key (attached on major/minor_list) can be on
611 + * both blocked_list and granted_list, so @grtlk must be one
612 + * of these two cases:
614 + * a) @grtlk is taken from granted_list, which means we've granted
615 + * more than one lock so @grtlk has to be PR, the first blocked
616 + * lock must be PW and we can't grant it at all.
617 + * So even @grtlk is not owner of the key (empty blocked_list),
618 + * we don't care because we can't grant any lock.
619 + * b) we just grant a new lock which is taken from head of blocked
620 + * list, and it should be the first granted lock, and it should
621 + * be the first one linked on blocked_list.
623 + * Either way, we can get correct result by iterating blocked_list
624 + * of @grtlk, and don't have to bother on how to find out
625 + * owner of current key.
627 + list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
628 + lk_nodes[dep].ln_blocked_list) {
629 + if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
630 + poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
632 + /* grant all readers */
633 + list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
634 + list_add(&poslk->lk_nodes[dep].ln_granted_list,
635 + &grtln->ln_granted_list);
637 + BUG_ON(poslk->lk_task == NULL);
638 + wake_up_process(poslk->lk_task);
641 + /* if @curln is the owner of this key, replace it with @grtln */
642 + if (!htree_key_list_empty(curln))
643 + htree_key_list_replace_init(curln, grtln);
645 + if (curln->ln_mode == HTREE_LOCK_INVAL)
646 + list_del_init(&curln->ln_alive_list);
650 + * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
651 + * and 0 only if @wait is false and can't grant it immediately
654 +htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
655 + u32 key, unsigned dep, int wait, void *event)
657 + struct htree_lock_head *lhead = lck->lk_head;
660 + BUG_ON(dep >= lck->lk_depth);
661 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
663 + htree_spin_lock(lhead, dep);
664 + rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
666 + htree_spin_unlock(lhead, dep);
669 +EXPORT_SYMBOL(htree_node_lock_try);
671 +/* it's wrapper of htree_node_unlock_internal */
673 +htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
675 + struct htree_lock_head *lhead = lck->lk_head;
677 + BUG_ON(dep >= lck->lk_depth);
678 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
680 + htree_spin_lock(lhead, dep);
681 + htree_node_unlock_internal(lhead, lck, dep, event);
682 + htree_spin_unlock(lhead, dep);
684 +EXPORT_SYMBOL(htree_node_unlock);
686 +/* stop listening on child-lock level @dep */
688 +htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
690 + struct htree_lock_node *ln = &lck->lk_nodes[dep];
691 + struct htree_lock_node *tmp;
693 + BUG_ON(htree_node_is_granted(lck, dep));
694 + BUG_ON(!list_empty(&ln->ln_blocked_list));
695 + BUG_ON(!list_empty(&ln->ln_granted_list));
697 + if (!htree_node_is_listening(lck, dep))
700 + htree_spin_lock(lck->lk_head, dep);
701 + ln->ln_mode = HTREE_LOCK_INVAL;
702 + ln->ln_ev_target = NULL;
704 + if (htree_key_list_empty(ln)) { /* not owner */
705 + list_del_init(&ln->ln_alive_list);
709 + /* I'm the owner... */
710 + if (list_empty(&ln->ln_alive_list)) { /* no more listener */
711 + htree_key_list_del_init(ln);
715 + tmp = list_entry(ln->ln_alive_list.next,
716 + struct htree_lock_node, ln_alive_list);
718 + BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
719 + htree_key_list_replace_init(ln, tmp);
720 + list_del_init(&ln->ln_alive_list);
722 + htree_spin_unlock(lck->lk_head, dep);
724 +EXPORT_SYMBOL(htree_node_stop_listen);
726 +/* release all child-locks if we have any */
728 +htree_node_release_all(struct htree_lock *lck)
732 + for (i = 0; i < lck->lk_depth; i++) {
733 + if (htree_node_is_granted(lck, i))
734 + htree_node_unlock(lck, i, NULL);
735 + else if (htree_node_is_listening(lck, i))
736 + htree_node_stop_listen(lck, i);
741 + * obtain htree lock, it could be blocked inside if there's conflict
742 + * with any granted or blocked lock and @wait is true.
743 + * NB: ALWAYS called holding lhead::lh_lock
746 +htree_lock_internal(struct htree_lock *lck, int wait)
748 + struct htree_lock_head *lhead = lck->lk_head;
753 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
754 + if (lhead->lh_ngranted[i] != 0)
756 + if (lhead->lh_nblocked[i] != 0)
759 + if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
760 + (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
761 + /* will block current lock even it just conflicts with any
762 + * other blocked lock, so lock like EX wouldn't starve */
765 + lhead->lh_nblocked[lck->lk_mode]++;
766 + lk_block_inc(lck->lk_mode);
768 + lck->lk_task = current;
769 + list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
771 + set_current_state(TASK_UNINTERRUPTIBLE);
772 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
773 + /* wait to be given the lock */
774 + if (lck->lk_task != NULL)
776 + /* granted, no doubt. wake up will set me RUNNING */
777 + return 0; /* without lh_lock */
779 + lhead->lh_ngranted[lck->lk_mode]++;
780 + lk_grant_inc(lck->lk_mode);
784 +/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
786 +htree_unlock_internal(struct htree_lock *lck)
788 + struct htree_lock_head *lhead = lck->lk_head;
789 + struct htree_lock *tmp;
790 + struct htree_lock *tmp2;
794 + BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
796 + lhead->lh_ngranted[lck->lk_mode]--;
797 + lck->lk_mode = HTREE_LOCK_INVAL;
799 + for (i = 0; i < HTREE_LOCK_MAX; i++) {
800 + if (lhead->lh_ngranted[i] != 0)
803 + list_for_each_entry_safe(tmp, tmp2,
804 + &lhead->lh_blocked_list, lk_blocked_list) {
805 + /* conflict with any granted lock? */
806 + if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
809 + list_del_init(&tmp->lk_blocked_list);
811 + BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
813 + lhead->lh_nblocked[tmp->lk_mode]--;
814 + lhead->lh_ngranted[tmp->lk_mode]++;
815 + granted |= 1 << tmp->lk_mode;
817 + BUG_ON(tmp->lk_task == NULL);
818 + wake_up_process(tmp->lk_task);
822 +/* it's wrapper of htree_lock_internal and exported interface.
823 + * It always return 1 with granted lock if @wait is true, it can return 0
824 + * if @wait is false and locking request can't be granted immediately */
826 +htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
827 + htree_lock_mode_t mode, int wait)
831 + BUG_ON(lck->lk_depth > lhead->lh_depth);
832 + BUG_ON(lck->lk_head != NULL);
833 + BUG_ON(lck->lk_task != NULL);
835 + lck->lk_head = lhead;
836 + lck->lk_mode = mode;
838 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
839 + rc = htree_lock_internal(lck, wait);
841 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
844 +EXPORT_SYMBOL(htree_lock_try);
846 +/* it's wrapper of htree_unlock_internal and exported interface.
847 + * It will release all htree_node_locks and htree_lock */
849 +htree_unlock(struct htree_lock *lck)
851 + BUG_ON(lck->lk_head == NULL);
852 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
854 + htree_node_release_all(lck);
856 + htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
857 + htree_unlock_internal(lck);
858 + htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
859 + lck->lk_head = NULL;
860 + lck->lk_task = NULL;
862 +EXPORT_SYMBOL(htree_unlock);
864 +/* change lock mode */
866 +htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
868 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
869 + lck->lk_mode = mode;
871 +EXPORT_SYMBOL(htree_change_mode);
873 +/* release htree lock, and lock it again with new mode.
874 + * This function will first release all htree_node_locks and htree_lock,
875 + * then try to gain htree_lock with new @mode.
876 + * It always return 1 with granted lock if @wait is true, it can return 0
877 + * if @wait is false and locking request can't be granted immediately */
879 +htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
881 + struct htree_lock_head *lhead = lck->lk_head;
884 + BUG_ON(lhead == NULL);
885 + BUG_ON(lck->lk_mode == mode);
886 + BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
888 + htree_node_release_all(lck);
890 + htree_spin_lock(lhead, HTREE_DEP_ROOT);
891 + htree_unlock_internal(lck);
892 + lck->lk_mode = mode;
893 + rc = htree_lock_internal(lck, wait);
895 + htree_spin_unlock(lhead, HTREE_DEP_ROOT);
898 +EXPORT_SYMBOL(htree_change_lock_try);
900 +/* create a htree_lock head with @depth levels (number of child-locks),
901 + * it is a per resoruce structure */
902 +struct htree_lock_head *
903 +htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
905 + struct htree_lock_head *lhead;
908 + if (depth > HTREE_LOCK_DEP_MAX) {
909 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
910 + depth, HTREE_LOCK_DEP_MAX);
914 + lhead = kzalloc(offsetof(struct htree_lock_head,
915 + lh_children[depth]) + priv, GFP_NOFS);
919 + if (hbits < HTREE_HBITS_MIN)
920 + lhead->lh_hbits = HTREE_HBITS_MIN;
921 + else if (hbits > HTREE_HBITS_MAX)
922 + lhead->lh_hbits = HTREE_HBITS_MAX;
924 + lhead->lh_lock = 0;
925 + lhead->lh_depth = depth;
926 + INIT_LIST_HEAD(&lhead->lh_blocked_list);
928 + lhead->lh_private = (void *)lhead +
929 + offsetof(struct htree_lock_head, lh_children[depth]);
932 + for (i = 0; i < depth; i++) {
933 + INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
934 + lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
938 +EXPORT_SYMBOL(htree_lock_head_alloc);
940 +/* free the htree_lock head */
942 +htree_lock_head_free(struct htree_lock_head *lhead)
946 + BUG_ON(!list_empty(&lhead->lh_blocked_list));
947 + for (i = 0; i < lhead->lh_depth; i++)
948 + BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
951 +EXPORT_SYMBOL(htree_lock_head_free);
953 +/* register event callback for @events of child-lock at level @dep */
955 +htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
956 + unsigned events, htree_event_cb_t callback)
958 + BUG_ON(lhead->lh_depth <= dep);
959 + lhead->lh_children[dep].lc_events = events;
960 + lhead->lh_children[dep].lc_callback = callback;
962 +EXPORT_SYMBOL(htree_lock_event_attach);
964 +/* allocate a htree_lock, which is per-thread structure, @pbytes is some
965 + * extra-bytes as private data for caller */
967 +htree_lock_alloc(unsigned depth, unsigned pbytes)
969 + struct htree_lock *lck;
970 + int i = offsetof(struct htree_lock, lk_nodes[depth]);
972 + if (depth > HTREE_LOCK_DEP_MAX) {
973 + printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
974 + depth, HTREE_LOCK_DEP_MAX);
977 + lck = kzalloc(i + pbytes, GFP_NOFS);
982 + lck->lk_private = (void *)lck + i;
983 + lck->lk_mode = HTREE_LOCK_INVAL;
984 + lck->lk_depth = depth;
985 + INIT_LIST_HEAD(&lck->lk_blocked_list);
987 + for (i = 0; i < depth; i++) {
988 + struct htree_lock_node *node = &lck->lk_nodes[i];
990 + node->ln_mode = HTREE_LOCK_INVAL;
991 + INIT_LIST_HEAD(&node->ln_major_list);
992 + INIT_LIST_HEAD(&node->ln_minor_list);
993 + INIT_LIST_HEAD(&node->ln_alive_list);
994 + INIT_LIST_HEAD(&node->ln_blocked_list);
995 + INIT_LIST_HEAD(&node->ln_granted_list);
1000 +EXPORT_SYMBOL(htree_lock_alloc);
1002 +/* free htree_lock node */
1004 +htree_lock_free(struct htree_lock *lck)
1006 + BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
1009 +EXPORT_SYMBOL(htree_lock_free);
1010 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1011 index 3f70bca..99a8da2 100644
1012 --- a/fs/ext4/namei.c
1013 +++ b/fs/ext4/namei.c
1014 @@ -52,6 +52,7 @@ struct buffer_head *ext4_append(handle_t *handle,
1017 struct buffer_head *bh;
1018 + struct ext4_inode_info *ei = EXT4_I(inode);
1021 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
1022 @@ -59,15 +60,22 @@ struct buffer_head *ext4_append(handle_t *handle,
1023 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
1024 return ERR_PTR(-ENOSPC);
1026 + /* with parallel dir operations all appends
1027 + * have to be serialized -bzzz */
1028 + down(&ei->i_append_sem);
1030 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
1032 bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
1035 + up(&ei->i_append_sem);
1038 inode->i_size += inode->i_sb->s_blocksize;
1039 EXT4_I(inode)->i_disksize = inode->i_size;
1040 BUFFER_TRACE(bh, "get_write_access");
1041 err = ext4_journal_get_write_access(handle, bh);
1042 + up(&ei->i_append_sem);
1045 ext4_std_error(inode->i_sb, err);
1046 @@ -247,7 +255,8 @@ static unsigned dx_node_limit(struct inode *dir);
1047 static struct dx_frame *dx_probe(struct ext4_filename *fname,
1049 struct dx_hash_info *hinfo,
1050 - struct dx_frame *frame);
1051 + struct dx_frame *frame,
1052 + struct htree_lock *lck);
1053 static void dx_release(struct dx_frame *frames);
1054 static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1055 unsigned blocksize, struct dx_hash_info *hinfo,
1056 @@ -261,12 +270,13 @@ static void dx_insert_block(struct dx_frame *frame,
1057 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1058 struct dx_frame *frame,
1059 struct dx_frame *frames,
1060 - __u32 *start_hash);
1061 + __u32 *start_hash, struct htree_lock *lck);
1062 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1063 struct ext4_filename *fname,
1064 - struct ext4_dir_entry_2 **res_dir);
1065 + struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
1066 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1067 - struct dentry *dentry, struct inode *inode);
1068 + struct dentry *dentry, struct inode *inode,
1069 + struct htree_lock *lck);
1071 /* checksumming functions */
1072 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
1073 @@ -733,6 +743,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1075 #endif /* DX_DEBUG */
1077 +/* private data for htree_lock */
1078 +struct ext4_dir_lock_data {
1079 + unsigned ld_flags; /* bits-map for lock types */
1080 + unsigned ld_count; /* # entries of the last DX block */
1081 + struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
1082 + struct dx_entry *ld_at; /* position of leaf dx_entry */
1085 +#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
1086 +#define ext4_find_entry(dir, name, dirent, inline) \
1087 + __ext4_find_entry(dir, name, dirent, inline, NULL)
1088 +#define ext4_add_entry(handle, dentry, inode) \
1089 + __ext4_add_entry(handle, dentry, inode, NULL)
1091 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
1092 +#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
1094 +static void ext4_htree_event_cb(void *target, void *event)
1096 + u64 *block = (u64 *)target;
1098 + if (*block == dx_get_block((struct dx_entry *)event))
1099 + *block = EXT4_HTREE_NODE_CHANGED;
1102 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
1104 + struct htree_lock_head *lhead;
1106 + lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
1107 + if (lhead != NULL) {
1108 + htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
1109 + ext4_htree_event_cb);
1113 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
1115 +struct htree_lock *ext4_htree_lock_alloc(void)
1117 + return htree_lock_alloc(EXT4_LK_MAX,
1118 + sizeof(struct ext4_dir_lock_data));
1120 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
1122 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
1125 + default: /* 0 or unknown flags require EX lock */
1126 + return HTREE_LOCK_EX;
1127 + case EXT4_HLOCK_READDIR:
1128 + return HTREE_LOCK_PR;
1129 + case EXT4_HLOCK_LOOKUP:
1130 + return HTREE_LOCK_CR;
1131 + case EXT4_HLOCK_DEL:
1132 + case EXT4_HLOCK_ADD:
1133 + return HTREE_LOCK_CW;
1137 +/* return PR for read-only operations, otherwise return EX */
1138 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
1140 + int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
1142 + /* 0 requires EX lock */
1143 + return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
1146 +static int ext4_htree_safe_locked(struct htree_lock *lck)
1150 + if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
1153 + writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
1155 + if (writer) /* all readers & writers are excluded? */
1156 + return lck->lk_mode == HTREE_LOCK_EX;
1158 + /* all writers are excluded? */
1159 + return lck->lk_mode == HTREE_LOCK_PR ||
1160 + lck->lk_mode == HTREE_LOCK_PW ||
1161 + lck->lk_mode == HTREE_LOCK_EX;
1164 +/* relock htree_lock with EX mode if it's change operation, otherwise
1165 + * relock it with PR mode. It's noop if PDO is disabled. */
1166 +static void ext4_htree_safe_relock(struct htree_lock *lck)
1168 + if (!ext4_htree_safe_locked(lck)) {
1169 + unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
1171 + htree_change_lock(lck, ext4_htree_safe_mode(flags));
1175 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
1176 + struct inode *dir, unsigned flags)
1178 + htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
1179 + ext4_htree_safe_mode(flags);
1181 + ext4_htree_lock_data(lck)->ld_flags = flags;
1182 + htree_lock(lck, lhead, mode);
1184 + ext4_htree_safe_relock(lck); /* make sure it's safe locked */
1186 +EXPORT_SYMBOL(ext4_htree_lock);
1188 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
1189 + unsigned lmask, int wait, void *ev)
1191 + u32 key = (at == NULL) ? 0 : dx_get_block(at);
1194 + /* NOOP if htree is well protected or caller doesn't require the lock */
1195 + if (ext4_htree_safe_locked(lck) ||
1196 + !(ext4_htree_lock_data(lck)->ld_flags & lmask))
1199 + mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
1200 + HTREE_LOCK_PW : HTREE_LOCK_PR;
1202 + if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
1204 + if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
1206 + cpu_relax(); /* spin until granted */
1210 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
1212 + return ext4_htree_safe_locked(lck) ||
1213 + htree_node_is_granted(lck, ffz(~lmask));
1216 +static void ext4_htree_node_unlock(struct htree_lock *lck,
1217 + unsigned lmask, void *buf)
1219 + /* NB: it's safe to call mutiple times or even it's not locked */
1220 + if (!ext4_htree_safe_locked(lck) &&
1221 + htree_node_is_granted(lck, ffz(~lmask)))
1222 + htree_node_unlock(lck, ffz(~lmask), buf);
1225 +#define ext4_htree_dx_lock(lck, key) \
1226 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
1227 +#define ext4_htree_dx_lock_try(lck, key) \
1228 + ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
1229 +#define ext4_htree_dx_unlock(lck) \
1230 + ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
1231 +#define ext4_htree_dx_locked(lck) \
1232 + ext4_htree_node_locked(lck, EXT4_LB_DX)
1234 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
1236 + struct ext4_dir_lock_data *ld;
1238 + if (ext4_htree_safe_locked(lck))
1241 + ld = ext4_htree_lock_data(lck);
1242 + switch (ld->ld_flags) {
1245 + case EXT4_HLOCK_LOOKUP:
1246 + ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
1248 + case EXT4_HLOCK_DEL:
1249 + ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
1251 + case EXT4_HLOCK_ADD:
1252 + ld->ld_flags = EXT4_HLOCK_SPLIT;
1257 +#define ext4_htree_de_lock(lck, key) \
1258 + ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
1259 +#define ext4_htree_de_unlock(lck) \
1260 + ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
1262 +#define ext4_htree_spin_lock(lck, key, event) \
1263 + ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
1264 +#define ext4_htree_spin_unlock(lck) \
1265 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
1266 +#define ext4_htree_spin_unlock_listen(lck, p) \
1267 + ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
1269 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
1271 + if (!ext4_htree_safe_locked(lck) &&
1272 + htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
1273 + htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
1277 + DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
1278 + DX_HASH_COL_YES, /* there is collision and it does matter */
1279 + DX_HASH_COL_NO, /* there is no collision */
1282 +static int dx_probe_hash_collision(struct htree_lock *lck,
1283 + struct dx_entry *entries,
1284 + struct dx_entry *at, u32 hash)
1286 + if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
1287 + return DX_HASH_COL_IGNORE; /* don't care about collision */
1289 + } else if (at == entries + dx_get_count(entries) - 1) {
1290 + return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
1292 + } else { /* hash collision? */
1293 + return ((dx_get_hash(at + 1) & ~1) == hash) ?
1294 + DX_HASH_COL_YES : DX_HASH_COL_NO;
1299 * Probe for a directory leaf block to search.
1301 @@ -744,10 +975,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
1303 static struct dx_frame *
1304 dx_probe(struct ext4_filename *fname, struct inode *dir,
1305 - struct dx_hash_info *hinfo, struct dx_frame *frame_in)
1306 + struct dx_hash_info *hinfo, struct dx_frame *frame_in,
1307 + struct htree_lock *lck)
1309 unsigned count, indirect;
1310 - struct dx_entry *at, *entries, *p, *q, *m;
1311 + struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
1312 struct dx_root_info *info;
1313 struct dx_frame *frame = frame_in;
1314 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
1315 @@ -808,8 +1040,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1317 dxtrace(printk("Look up %x", hash));
1319 + if (indirect == 0) { /* the last index level */
1320 + /* NB: ext4_htree_dx_lock() could be noop if
1321 + * DX-lock flag is not set for current operation */
1322 + ext4_htree_dx_lock(lck, dx);
1323 + ext4_htree_spin_lock(lck, dx, NULL);
1325 count = dx_get_count(entries);
1326 - if (!count || count > dx_get_limit(entries)) {
1327 + if (count == 0 || count > dx_get_limit(entries)) {
1328 + ext4_htree_spin_unlock(lck); /* release spin */
1329 ext4_warning_inode(dir,
1330 "dx entry: count %u beyond limit %u",
1331 count, dx_get_limit(entries));
1332 @@ -847,8 +1086,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
1334 frame->entries = entries;
1338 + if (indirect == 0) { /* the last index level */
1339 + struct ext4_dir_lock_data *ld;
1342 + /* By default we only lock DE-block, however, we will
1343 + * also lock the last level DX-block if:
1344 + * a) there is hash collision
1345 + * we will set DX-lock flag (a few lines below)
1346 + * and redo to lock DX-block
1347 + * see detail in dx_probe_hash_collision()
1348 + * b) it's a retry from splitting
1349 + * we need to lock the last level DX-block so nobody
1350 + * else can split any leaf blocks under the same
1351 + * DX-block, see detail in ext4_dx_add_entry()
1353 + if (ext4_htree_dx_locked(lck)) {
1354 + /* DX-block is locked, just lock DE-block
1356 + ext4_htree_spin_unlock(lck);
1357 + if (!ext4_htree_safe_locked(lck))
1358 + ext4_htree_de_lock(lck, frame->at);
1361 + /* it's pdirop and no DX lock */
1362 + if (dx_probe_hash_collision(lck, entries, at, hash) ==
1363 + DX_HASH_COL_YES) {
1364 + /* found hash collision, set DX-lock flag
1365 + * and retry to abtain DX-lock */
1366 + ext4_htree_spin_unlock(lck);
1367 + ext4_htree_dx_need_lock(lck);
1370 + ld = ext4_htree_lock_data(lck);
1371 + /* because I don't lock DX, so @at can't be trusted
1372 + * after I release spinlock so I have to save it */
1374 + ld->ld_at_entry = *at;
1375 + ld->ld_count = dx_get_count(entries);
1377 + frame->at = &ld->ld_at_entry;
1378 + myblock = dx_get_block(at);
1380 + /* NB: ordering locking */
1381 + ext4_htree_spin_unlock_listen(lck, &myblock);
1382 + /* other thread can split this DE-block because:
1383 + * a) I don't have lock for the DE-block yet
1384 + * b) I released spinlock on DX-block
1385 + * if it happened I can detect it by listening
1386 + * splitting event on this DE-block */
1387 + ext4_htree_de_lock(lck, frame->at);
1388 + ext4_htree_spin_stop_listen(lck);
1390 + if (myblock == EXT4_HTREE_NODE_CHANGED) {
1391 + /* someone split this DE-block before
1392 + * I locked it, I need to retry and lock
1393 + * valid DE-block */
1394 + ext4_htree_de_unlock(lck);
1402 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
1403 if (IS_ERR(frame->bh)) {
1404 @@ -915,7 +1216,7 @@ static void dx_release(struct dx_frame *frames)
1405 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1406 struct dx_frame *frame,
1407 struct dx_frame *frames,
1408 - __u32 *start_hash)
1409 + __u32 *start_hash, struct htree_lock *lck)
1412 struct buffer_head *bh;
1413 @@ -930,12 +1231,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1414 * this loop, num_frames indicates the number of interior
1415 * nodes need to be read.
1417 + ext4_htree_de_unlock(lck);
1419 - if (++(p->at) < p->entries + dx_get_count(p->entries))
1421 + if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
1422 + /* num_frames > 0 :
1424 + * ext4_htree_dx_locked:
1425 + * frame->at is reliable pointer returned by dx_probe,
1426 + * otherwise dx_probe already knew no collision */
1427 + if (++(p->at) < p->entries + dx_get_count(p->entries))
1433 + if (num_frames == 1)
1434 + ext4_htree_dx_unlock(lck);
1438 @@ -958,6 +1269,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1439 * block so no check is necessary
1441 while (num_frames--) {
1442 + if (num_frames == 0) {
1443 + /* it's not always necessary, we just don't want to
1444 + * detect hash collision again */
1445 + ext4_htree_dx_need_lock(lck);
1446 + ext4_htree_dx_lock(lck, p->at);
1449 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
1452 @@ -966,6 +1284,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
1454 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
1456 + ext4_htree_de_lock(lck, p->at);
1460 @@ -1110,10 +1429,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1462 hinfo.hash = start_hash;
1463 hinfo.minor_hash = 0;
1464 - frame = dx_probe(NULL, dir, &hinfo, frames);
1465 + /* assume it's PR locked */
1466 + frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
1468 return PTR_ERR(frame);
1470 /* Add '.' and '..' from the htree header */
1471 if (!start_hash && !start_minor_hash) {
1472 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
1473 @@ -1148,7 +1467,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
1476 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1477 - frame, frames, &hashval);
1478 + frame, frames, &hashval, NULL);
1479 *next_hash = hashval;
1482 @@ -1372,10 +1691,10 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
1483 * The returned buffer_head has ->b_count elevated. The caller is expected
1484 * to brelse() it when appropriate.
1486 -static struct buffer_head * ext4_find_entry (struct inode *dir,
1487 +struct buffer_head *__ext4_find_entry(struct inode *dir,
1488 const struct qstr *d_name,
1489 struct ext4_dir_entry_2 **res_dir,
1491 + int *inlined, struct htree_lock *lck)
1493 struct super_block *sb;
1494 struct buffer_head *bh_use[NAMEI_RA_SIZE];
1495 @@ -1423,7 +1742,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1499 - ret = ext4_dx_find_entry(dir, &fname, res_dir);
1500 + ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
1502 * On success, or if the error was file not found,
1503 * return. Otherwise, fall back to doing a search the
1504 @@ -1433,6 +1752,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1505 goto cleanup_and_exit;
1506 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1508 + ext4_htree_safe_relock(lck);
1510 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1511 start = EXT4_I(dir)->i_dir_start_lookup;
1512 @@ -1528,10 +1848,12 @@ cleanup_and_exit:
1513 ext4_fname_free_filename(&fname);
1516 +EXPORT_SYMBOL(__ext4_find_entry);
1518 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1519 struct ext4_filename *fname,
1520 - struct ext4_dir_entry_2 **res_dir)
1521 + struct ext4_dir_entry_2 **res_dir,
1522 + struct htree_lock *lck)
1524 struct super_block * sb = dir->i_sb;
1525 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1526 @@ -1543,7 +1865,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1527 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1530 - frame = dx_probe(fname, dir, NULL, frames);
1531 + frame = dx_probe(fname, dir, NULL, frames, lck);
1533 return (struct buffer_head *) frame;
1535 @@ -1565,7 +1887,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1537 /* Check to see if we should continue to search */
1538 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
1540 + frames, NULL, lck);
1542 ext4_warning_inode(dir,
1543 "error %d reading directory index block",
1544 @@ -1738,8 +2060,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1545 * Returns pointer to de in block into which the new entry will be inserted.
1547 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1548 - struct buffer_head **bh,struct dx_frame *frame,
1549 - struct dx_hash_info *hinfo)
1550 + struct buffer_head **bh, struct dx_frame *frames,
1551 + struct dx_frame *frame, struct dx_hash_info *hinfo,
1552 + struct htree_lock *lck)
1554 unsigned blocksize = dir->i_sb->s_blocksize;
1555 unsigned count, continued;
1556 @@ -1801,8 +2124,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1557 hash2, split, count-split));
1559 /* Fancy dance to stay within two buffers */
1560 - de2 = dx_move_dirents(data1, data2, map + split, count - split,
1562 + if (hinfo->hash < hash2) {
1563 + de2 = dx_move_dirents(data1, data2, map + split,
1564 + count - split, blocksize);
1566 + /* make sure we will add entry to the same block which
1567 + * we have already locked */
1568 + de2 = dx_move_dirents(data1, data2, map, split, blocksize);
1570 de = dx_pack_dirents(data1, blocksize);
1571 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1573 @@ -1823,12 +2152,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1574 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1577 - /* Which block gets the new entry? */
1578 - if (hinfo->hash >= hash2) {
1581 + ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
1582 + frame->at); /* notify block is being split */
1583 + if (hinfo->hash < hash2) {
1584 + dx_insert_block(frame, hash2 + continued, newblock);
1587 + /* switch block number */
1588 + dx_insert_block(frame, hash2 + continued,
1589 + dx_get_block(frame->at));
1590 + dx_set_block(frame->at, newblock);
1593 - dx_insert_block(frame, hash2 + continued, newblock);
1594 + ext4_htree_spin_unlock(lck);
1595 + ext4_htree_dx_unlock(lck);
1597 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1600 @@ -2122,7 +2460,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
1604 - de = do_split(handle,dir, &bh, frame, &fname->hinfo);
1605 + de = do_split(handle,dir, &bh, frames, frame, &fname->hinfo, NULL);
1607 retval = PTR_ERR(de);
1609 @@ -2233,8 +2571,8 @@ out:
1610 * may not sleep between calling this and putting something into
1611 * the entry, as someone else might have used it while you slept.
1613 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1614 - struct inode *inode)
1615 +int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
1616 + struct inode *inode, struct htree_lock *lck)
1618 struct inode *dir = d_inode(dentry->d_parent);
1619 struct buffer_head *bh = NULL;
1620 @@ -2275,9 +2613,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1621 if (dentry->d_name.len == 2 &&
1622 memcmp(dentry->d_name.name, "..", 2) == 0)
1623 return ext4_update_dotdot(handle, dentry, inode);
1624 - retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
1625 + retval = ext4_dx_add_entry(handle, &fname, dentry, inode, lck);
1626 if (!retval || (retval != ERR_BAD_DX_DIR))
1628 + ext4_htree_safe_relock(lck);
1629 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1631 ext4_mark_inode_dirty(handle, dir);
1632 @@ -2327,12 +2666,14 @@ out:
1633 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1636 +EXPORT_SYMBOL(__ext4_add_entry);
1639 * Returns 0 for success, or a negative error value
1641 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1642 - struct dentry *dentry, struct inode *inode)
1643 + struct dentry *dentry, struct inode *inode,
1644 + struct htree_lock *lck)
1646 struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
1647 struct dx_entry *entries, *at;
1648 @@ -2345,7 +2686,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
1652 - frame = dx_probe(fname, dir, NULL, frames);
1653 + frame = dx_probe(fname, dir, NULL, frames, lck);
1655 return PTR_ERR(frame);
1656 entries = frame->entries;
1657 @@ -2375,6 +2716,11 @@ again:
1658 struct dx_node *node2;
1659 struct buffer_head *bh2;
1661 + if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
1662 + ext4_htree_safe_relock(lck);
1666 while (frame > frames) {
1667 if (dx_get_count((frame - 1)->entries) <
1668 dx_get_limit((frame - 1)->entries)) {
1669 @@ -2474,8 +2820,32 @@ again:
1673 + } else if (!ext4_htree_dx_locked(lck)) {
1674 + struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
1676 + /* not well protected, require DX lock */
1677 + ext4_htree_dx_need_lock(lck);
1678 + at = frame > frames ? (frame - 1)->at : NULL;
1680 + /* NB: no risk of deadlock because it's just a try.
1682 + * NB: we check ld_count for twice, the first time before
1683 + * having DX lock, the second time after holding DX lock.
1685 + * NB: We never free blocks for directory so far, which
1686 + * means value returned by dx_get_count() should equal to
1687 + * ld->ld_count if nobody split any DE-block under @at,
1688 + * and ld->ld_at still points to valid dx_entry. */
1689 + if ((ld->ld_count != dx_get_count(entries)) ||
1690 + !ext4_htree_dx_lock_try(lck, at) ||
1691 + (ld->ld_count != dx_get_count(entries))) {
1695 + /* OK, I've got DX lock and nothing changed */
1696 + frame->at = ld->ld_at;
1698 - de = do_split(handle, dir, &bh, frame, &fname->hinfo);
1699 + de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
1703 @@ -2486,6 +2856,8 @@ again:
1705 ext4_std_error(dir->i_sb, err);
1707 + ext4_htree_dx_unlock(lck);
1708 + ext4_htree_de_unlock(lck);
1711 /* @restart is true means htree-path has been changed, we need to
1712 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1713 index 02fe65b..be65ad4 100644
1714 --- a/fs/ext4/super.c
1715 +++ b/fs/ext4/super.c
1716 @@ -896,6 +896,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
1718 ei->vfs_inode.i_version = 1;
1719 spin_lock_init(&ei->i_raw_lock);
1720 + sema_init(&ei->i_append_sem, 1);
1721 INIT_LIST_HEAD(&ei->i_prealloc_list);
1722 spin_lock_init(&ei->i_prealloc_lock);
1723 ext4_es_init_tree(&ei->i_es_tree);
1724 diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
1725 new file mode 100644
1726 index 0000000..9dc7788
1728 +++ b/include/linux/htree_lock.h
1731 + * include/linux/htree_lock.h
1733 + * Copyright (c) 2011, 2012, Intel Corporation.
1735 + * Author: Liang Zhen <liang@whamcloud.com>
1741 + * htree_lock is an advanced lock, it can support five lock modes (concept is
1742 + * taken from DLM) and it's a sleeping lock.
1744 + * most common use case is:
1745 + * - create a htree_lock_head for data
1746 + * - each thread (contender) creates it's own htree_lock
1747 + * - contender needs to call htree_lock(lock_node, mode) to protect data and
1748 + * call htree_unlock to release lock
1750 + * Also, there is advanced use-case which is more complex, user can have
1751 + * PW/PR lock on particular key, it's mostly used while user holding shared
1752 + * lock on the htree (CW, CR)
1754 + * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
1755 + * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
1757 + * htree_node_unlock(lock_node);; unlock the key
1759 + * Another tip is, we can have N-levels of this kind of keys, all we need to
1760 + * do is specifying N-levels while creating htree_lock_head, then we can
1761 + * lock/unlock a specific level by:
1762 + * htree_node_lock(lock_node, mode1, key1, level1...);
1764 + * htree_node_lock(lock_node, mode1, key2, level2...);
1766 + * htree_node_unlock(lock_node, level2);
1767 + * htree_node_unlock(lock_node, level1);
1769 + * NB: for multi-level, should be careful about locking order to avoid deadlock
1772 +#ifndef _LINUX_HTREE_LOCK_H
1773 +#define _LINUX_HTREE_LOCK_H
1775 +#include <linux/list.h>
1776 +#include <linux/spinlock.h>
1777 +#include <linux/sched.h>
1781 + * more details can be found here:
1782 + * http://en.wikipedia.org/wiki/Distributed_lock_manager
1785 + HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
1786 + HTREE_LOCK_PW, /* protected write: allows only CR users */
1787 + HTREE_LOCK_PR, /* protected read: allow PR, CR users */
1788 + HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
1789 + HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
1790 + HTREE_LOCK_MAX, /* number of lock modes */
1791 +} htree_lock_mode_t;
1793 +#define HTREE_LOCK_NL HTREE_LOCK_MAX
1794 +#define HTREE_LOCK_INVAL 0xdead10c
1797 + HTREE_HBITS_MIN = 2,
1798 + HTREE_HBITS_DEF = 14,
1799 + HTREE_HBITS_MAX = 32,
1803 + HTREE_EVENT_DISABLE = (0),
1804 + HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
1805 + HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
1806 + HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
1811 +typedef void (*htree_event_cb_t)(void *target, void *event);
1813 +struct htree_lock_child {
1814 + struct list_head lc_list; /* granted list */
1815 + htree_event_cb_t lc_callback; /* event callback */
1816 + unsigned lc_events; /* event types */
1819 +struct htree_lock_head {
1820 + unsigned long lh_lock; /* bits lock */
1821 + /* blocked lock list (htree_lock) */
1822 + struct list_head lh_blocked_list;
1823 + /* # key levels */
1825 + /* hash bits for key and limit number of locks */
1827 + /* counters for blocked locks */
1828 + u16 lh_nblocked[HTREE_LOCK_MAX];
1829 + /* counters for granted locks */
1830 + u16 lh_ngranted[HTREE_LOCK_MAX];
1831 + /* private data */
1833 + /* array of children locks */
1834 + struct htree_lock_child lh_children[0];
1837 +/* htree_lock_node_t is child-lock for a specific key (ln_value) */
1838 +struct htree_lock_node {
1839 + htree_lock_mode_t ln_mode;
1840 + /* major hash key */
1842 + /* minor hash key */
1844 + struct list_head ln_major_list;
1845 + struct list_head ln_minor_list;
1846 + /* alive list, all locks (granted, blocked, listening) are on it */
1847 + struct list_head ln_alive_list;
1848 + /* blocked list */
1849 + struct list_head ln_blocked_list;
1850 + /* granted list */
1851 + struct list_head ln_granted_list;
1852 + void *ln_ev_target;
1855 +struct htree_lock {
1856 + struct task_struct *lk_task;
1857 + struct htree_lock_head *lk_head;
1859 + unsigned lk_depth;
1860 + htree_lock_mode_t lk_mode;
1861 + struct list_head lk_blocked_list;
1862 + struct htree_lock_node lk_nodes[0];
1865 +/* create a lock head, which stands for a resource */
1866 +struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
1867 + unsigned hbits, unsigned priv);
1868 +/* free a lock head */
1869 +void htree_lock_head_free(struct htree_lock_head *lhead);
1870 +/* register event callback for child lock at level @depth */
1871 +void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
1872 + unsigned events, htree_event_cb_t callback);
1873 +/* create a lock handle, which stands for a thread */
1874 +struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
1875 +/* free a lock handle */
1876 +void htree_lock_free(struct htree_lock *lck);
1877 +/* lock htree, when @wait is true, 0 is returned if the lock can't
1878 + * be granted immediately */
1879 +int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
1880 + htree_lock_mode_t mode, int wait);
1882 +void htree_unlock(struct htree_lock *lck);
1883 +/* unlock and relock htree with @new_mode */
1884 +int htree_change_lock_try(struct htree_lock *lck,
1885 + htree_lock_mode_t new_mode, int wait);
1886 +void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
1887 +/* require child lock (key) of htree at level @dep, @event will be sent to all
1888 + * listeners on this @key while lock being granted */
1889 +int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
1890 + u32 key, unsigned dep, int wait, void *event);
1891 +/* release child lock at level @dep, this lock will listen on it's key
1892 + * if @event isn't NULL, event_cb will be called against @lck while granting
1893 + * any other lock at level @dep with the same key */
1894 +void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
1895 +/* stop listening on child lock at level @dep */
1896 +void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
1898 +void htree_lock_stat_print(int depth);
1899 +void htree_lock_stat_reset(void);
1901 +#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
1902 +#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
1904 +#define htree_lock_mode(lck) ((lck)->lk_mode)
1906 +#define htree_node_lock(lck, mode, key, dep) \
1907 + htree_node_lock_try(lck, mode, key, dep, 1, NULL)
1908 +/* this is only safe in thread context of lock owner */
1909 +#define htree_node_is_granted(lck, dep) \
1910 + ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
1911 + (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
1912 +/* this is only safe in thread context of lock owner */
1913 +#define htree_node_is_listening(lck, dep) \
1914 + ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)