Whamcloud - gitweb
LU-14899 ldiskfs: Add 5.4.136 mainline kernel support
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / linux-5.4 / ext4-pdirop.patch
1 From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
2 From: Liang Zhen <liang.zhen@intel.com>
3 Date: Wed, 15 Jan 2020 07:35:13 -0600
4 Subject: [PATCH] LU-50 ldiskfs: parallel directory operations for ext4
5
6  In a typical use case an application creates a separate output file for each
7  node and task in a job. As nodes and tasks increase, hundreds of thousands of
8  files may be created in a single directory within a short window of time.
9  Today, both filename lookup and file system modifying operations (such as
10  create and unlink) are protected with a single lock for an entire ldiskfs
11  directory. PDO project will remove this bottleneck by introducing a parallel
12  locking mechanism for entire ldiskfs directories. This work will enable
13  multiple application threads to simultaneously lookup, create and unlink in
14  parallel.
15
16 This patch contains:
17  - pdirops support for ldiskfs
18  - integrate with osd-ldiskfs
19 Signed-off-by: Liang Zhen <liang.zhen@intel.com>
20 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
21 ---
22  fs/ext4/Makefile           |   1 +
23  fs/ext4/ext4.h             |  78 ++++
24  fs/ext4/htree_lock.c       | 891 +++++++++++++++++++++++++++++++++++++
25  fs/ext4/namei.c            | 454 +++++++++++++++++--
26  fs/ext4/super.c            |   1 +
27  include/linux/htree_lock.h | 187 ++++++++
28  6 files changed, 1572 insertions(+), 40 deletions(-)
29  create mode 100644 fs/ext4/htree_lock.c
30  create mode 100644 include/linux/htree_lock.h
31
32 Index: linux-stage/fs/ext4/ext4.h
33 ===================================================================
34 --- linux-stage.orig/fs/ext4/ext4.h
35 +++ linux-stage/fs/ext4/ext4.h
36 @@ -29,6 +29,7 @@
37  #include <linux/timer.h>
38  #include <linux/version.h>
39  #include <linux/wait.h>
40 +#include <linux/htree_lock.h>
41  #include <linux/sched/signal.h>
42  #include <linux/blockgroup_lock.h>
43  #include <linux/percpu_counter.h>
44 @@ -961,6 +962,9 @@ struct ext4_inode_info {
45         __u32   i_dtime;
46         ext4_fsblk_t    i_file_acl;
47  
48 +       /* following fields for parallel directory operations -bzzz */
49 +       struct semaphore i_append_sem;
50 +
51         /*
52          * i_block_group is the number of the block group which contains
53          * this file's inode.  Constant across the lifetime of the inode,
54 @@ -2207,6 +2211,72 @@ struct dx_hash_info
55   */
56  #define HASH_NB_ALWAYS         1
57  
58 +/* assume name-hash is protected by upper layer */
59 +#define EXT4_HTREE_LOCK_HASH   0
60 +
61 +enum ext4_pdo_lk_types {
62 +#if EXT4_HTREE_LOCK_HASH
63 +       EXT4_LK_HASH,
64 +#endif
65 +       EXT4_LK_DX,             /* index block */
66 +       EXT4_LK_DE,             /* directory entry block */
67 +       EXT4_LK_SPIN,           /* spinlock */
68 +       EXT4_LK_MAX,
69 +};
70 +
71 +/* read-only bit */
72 +#define EXT4_LB_RO(b)          (1 << (b))
73 +/* read + write, high bits for writer */
74 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
75 +
76 +enum ext4_pdo_lock_bits {
77 +       /* DX lock bits */
78 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
79 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
80 +       /* DE lock bits */
81 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
82 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
83 +       /* DX spinlock bits */
84 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
85 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
86 +       /* accurate searching */
87 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
88 +};
89 +
90 +enum ext4_pdo_lock_opc {
91 +       /* external */
92 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
93 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
94 +                                  EXT4_LB_EXACT),
95 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
96 +                                  EXT4_LB_EXACT),
97 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
98 +
99 +       /* internal */
100 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
101 +                                  EXT4_LB_EXACT),
102 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
103 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
104 +};
105 +
106 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
107 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
108 +
109 +extern struct htree_lock *ext4_htree_lock_alloc(void);
110 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
111 +
112 +extern void ext4_htree_lock(struct htree_lock *lck,
113 +                           struct htree_lock_head *lhead,
114 +                           struct inode *dir, unsigned flags);
115 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
116 +
117 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
118 +                                       const struct qstr *d_name,
119 +                                       struct ext4_dir_entry_2 **res_dir,
120 +                                       int *inlined, struct htree_lock *lck);
121 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
122 +                     struct inode *inode, struct htree_lock *lck);
123 +
124  struct ext4_filename {
125         const struct qstr *usr_fname;
126         struct fscrypt_str disk_name;
127 @@ -2574,12 +2644,21 @@ void ext4_insert_dentry(struct inode *in
128                         struct ext4_filename *fname, void *data);
129  static inline void ext4_update_dx_flag(struct inode *inode)
130  {
131 +       /* Disable it for ldiskfs, because going from a DX directory to
132 +        * a non-DX directory while it is in use will completely break
133 +        * the htree-locking.
134 +        * If we really want to support this operation in the future,
135 +        * we need to exclusively lock the directory at here which will
136 +        * increase complexity of code
137 +        */
138 +#if 0
139         if (!ext4_has_feature_dir_index(inode->i_sb) &&
140             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
141                 /* ext4_iget() should have caught this... */
142                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
143                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
144         }
145 +#endif
146  }
147  static const unsigned char ext4_filetype_table[] = {
148         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
149 Index: linux-stage/fs/ext4/Makefile
150 ===================================================================
151 --- linux-stage.orig/fs/ext4/Makefile
152 +++ linux-stage/fs/ext4/Makefile
153 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
154  
155  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
156                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
157 +               htree_lock.o \
158                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
159                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
160                 super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
161 Index: linux-stage/fs/ext4/namei.c
162 ===================================================================
163 --- linux-stage.orig/fs/ext4/namei.c
164 +++ linux-stage/fs/ext4/namei.c
165 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
166                                         ext4_lblk_t *block)
167  {
168         struct buffer_head *bh;
169 +       struct ext4_inode_info *ei = EXT4_I(inode);
170         int err;
171  
172         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
173 @@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t
174                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
175                 return ERR_PTR(-ENOSPC);
176  
177 +       /* with parallel dir operations all appends
178 +       * have to be serialized -bzzz */
179 +       down(&ei->i_append_sem);
180 +
181         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
182  
183         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
184 -       if (IS_ERR(bh))
185 +       if (IS_ERR(bh)) {
186 +               up(&ei->i_append_sem);
187                 return bh;
188 +       }
189         inode->i_size += inode->i_sb->s_blocksize;
190         EXT4_I(inode)->i_disksize = inode->i_size;
191         BUFFER_TRACE(bh, "get_write_access");
192         err = ext4_journal_get_write_access(handle, bh);
193 +       up(&ei->i_append_sem);
194         if (err) {
195                 brelse(bh);
196                 ext4_std_error(inode->i_sb, err);
197 @@ -264,7 +272,8 @@ static unsigned dx_node_limit(struct ino
198  static struct dx_frame *dx_probe(struct ext4_filename *fname,
199                                  struct inode *dir,
200                                  struct dx_hash_info *hinfo,
201 -                                struct dx_frame *frame);
202 +                                struct dx_frame *frame,
203 +                                struct htree_lock *lck);
204  static void dx_release(struct dx_frame *frames);
205  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
206                        unsigned blocksize, struct dx_hash_info *hinfo,
207 @@ -278,12 +287,13 @@ static void dx_insert_block(struct dx_fr
208  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
209                                  struct dx_frame *frame,
210                                  struct dx_frame *frames,
211 -                                __u32 *start_hash);
212 +                                __u32 *start_hash, struct htree_lock *lck);
213  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
214                 struct ext4_filename *fname,
215 -               struct ext4_dir_entry_2 **res_dir);
216 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
217  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
218 -                            struct inode *dir, struct inode *inode);
219 +                            struct inode *dir, struct inode *inode,
220 +                            struct htree_lock *lck);
221  
222  /* checksumming functions */
223  void ext4_initialize_dirent_tail(struct buffer_head *bh,
224 @@ -748,6 +758,227 @@ struct stats dx_show_entries(struct dx_h
225  }
226  #endif /* DX_DEBUG */
227  
228 +/* private data for htree_lock */
229 +struct ext4_dir_lock_data {
230 +       unsigned                ld_flags;  /* bits-map for lock types */
231 +       unsigned                ld_count;  /* # entries of the last DX block */
232 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
233 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
234 +};
235 +
236 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
237 +#define ext4_find_entry(dir, name, dirent, inline) \
238 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
239 +#define ext4_add_entry(handle, dentry, inode) \
240 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
241 +
242 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
243 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
244 +
245 +static void ext4_htree_event_cb(void *target, void *event)
246 +{
247 +       u64 *block = (u64 *)target;
248 +
249 +       if (*block == dx_get_block((struct dx_entry *)event))
250 +               *block = EXT4_HTREE_NODE_CHANGED;
251 +}
252 +
253 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
254 +{
255 +       struct htree_lock_head *lhead;
256 +
257 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
258 +       if (lhead != NULL) {
259 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
260 +                                       ext4_htree_event_cb);
261 +       }
262 +       return lhead;
263 +}
264 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
265 +
266 +struct htree_lock *ext4_htree_lock_alloc(void)
267 +{
268 +       return htree_lock_alloc(EXT4_LK_MAX,
269 +                               sizeof(struct ext4_dir_lock_data));
270 +}
271 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
272 +
273 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
274 +{
275 +       switch (flags) {
276 +       default: /* 0 or unknown flags require EX lock */
277 +               return HTREE_LOCK_EX;
278 +       case EXT4_HLOCK_READDIR:
279 +               return HTREE_LOCK_PR;
280 +       case EXT4_HLOCK_LOOKUP:
281 +               return HTREE_LOCK_CR;
282 +       case EXT4_HLOCK_DEL:
283 +       case EXT4_HLOCK_ADD:
284 +               return HTREE_LOCK_CW;
285 +       }
286 +}
287 +
288 +/* return PR for read-only operations, otherwise return EX */
289 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
290 +{
291 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
292 +
293 +       /* 0 requires EX lock */
294 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
295 +}
296 +
297 +static int ext4_htree_safe_locked(struct htree_lock *lck)
298 +{
299 +       int writer;
300 +
301 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
302 +               return 1;
303 +
304 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
305 +                EXT4_LB_DE;
306 +       if (writer) /* all readers & writers are excluded? */
307 +               return lck->lk_mode == HTREE_LOCK_EX;
308 +
309 +       /* all writers are excluded? */
310 +       return lck->lk_mode == HTREE_LOCK_PR ||
311 +              lck->lk_mode == HTREE_LOCK_PW ||
312 +              lck->lk_mode == HTREE_LOCK_EX;
313 +}
314 +
315 +/* relock htree_lock with EX mode if it's change operation, otherwise
316 + * relock it with PR mode. It's noop if PDO is disabled. */
317 +static void ext4_htree_safe_relock(struct htree_lock *lck)
318 +{
319 +       if (!ext4_htree_safe_locked(lck)) {
320 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
321 +
322 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
323 +       }
324 +}
325 +
326 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
327 +                    struct inode *dir, unsigned flags)
328 +{
329 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
330 +                                             ext4_htree_safe_mode(flags);
331 +
332 +       ext4_htree_lock_data(lck)->ld_flags = flags;
333 +       htree_lock(lck, lhead, mode);
334 +       if (!is_dx(dir))
335 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
336 +}
337 +EXPORT_SYMBOL(ext4_htree_lock);
338 +
339 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
340 +                               unsigned lmask, int wait, void *ev)
341 +{
342 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
343 +       u32     mode;
344 +
345 +       /* NOOP if htree is well protected or caller doesn't require the lock */
346 +       if (ext4_htree_safe_locked(lck) ||
347 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
348 +               return 1;
349 +
350 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
351 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
352 +       while (1) {
353 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
354 +                       return 1;
355 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
356 +                       return 0;
357 +               cpu_relax(); /* spin until granted */
358 +       }
359 +}
360 +
361 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
362 +{
363 +       return ext4_htree_safe_locked(lck) ||
364 +              htree_node_is_granted(lck, ffz(~lmask));
365 +}
366 +
367 +static void ext4_htree_node_unlock(struct htree_lock *lck,
368 +                                  unsigned lmask, void *buf)
369 +{
370 +       /* NB: it's safe to call mutiple times or even it's not locked */
371 +       if (!ext4_htree_safe_locked(lck) &&
372 +            htree_node_is_granted(lck, ffz(~lmask)))
373 +               htree_node_unlock(lck, ffz(~lmask), buf);
374 +}
375 +
376 +#define ext4_htree_dx_lock(lck, key)           \
377 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
378 +#define ext4_htree_dx_lock_try(lck, key)       \
379 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
380 +#define ext4_htree_dx_unlock(lck)              \
381 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
382 +#define ext4_htree_dx_locked(lck)              \
383 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
384 +
385 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
386 +{
387 +       struct ext4_dir_lock_data *ld;
388 +
389 +       if (ext4_htree_safe_locked(lck))
390 +               return;
391 +
392 +       ld = ext4_htree_lock_data(lck);
393 +       switch (ld->ld_flags) {
394 +       default:
395 +               return;
396 +       case EXT4_HLOCK_LOOKUP:
397 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
398 +               return;
399 +       case EXT4_HLOCK_DEL:
400 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
401 +               return;
402 +       case EXT4_HLOCK_ADD:
403 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
404 +               return;
405 +       }
406 +}
407 +
408 +#define ext4_htree_de_lock(lck, key)           \
409 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
410 +#define ext4_htree_de_unlock(lck)              \
411 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
412 +
413 +#define ext4_htree_spin_lock(lck, key, event)  \
414 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
415 +#define ext4_htree_spin_unlock(lck)            \
416 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
417 +#define ext4_htree_spin_unlock_listen(lck, p)  \
418 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
419 +
420 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
421 +{
422 +       if (!ext4_htree_safe_locked(lck) &&
423 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
424 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
425 +}
426 +
427 +enum {
428 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
429 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
430 +       DX_HASH_COL_NO,         /* there is no collision */
431 +};
432 +
433 +static int dx_probe_hash_collision(struct htree_lock *lck,
434 +                                  struct dx_entry *entries,
435 +                                  struct dx_entry *at, u32 hash)
436 +{
437 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
438 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
439 +
440 +       } else if (at == entries + dx_get_count(entries) - 1) {
441 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
442 +
443 +       } else { /* hash collision? */
444 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
445 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
446 +       }
447 +}
448 +
449  /*
450   * Probe for a directory leaf block to search.
451   *
452 @@ -759,10 +990,11 @@ struct stats dx_show_entries(struct dx_h
453   */
454  static struct dx_frame *
455  dx_probe(struct ext4_filename *fname, struct inode *dir,
456 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
457 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
458 +        struct htree_lock *lck)
459  {
460         unsigned count, indirect;
461 -       struct dx_entry *at, *entries, *p, *q, *m;
462 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
463         struct dx_root_info *info;
464         struct dx_frame *frame = frame_in;
465         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
466 @@ -824,8 +1056,15 @@ dx_probe(struct ext4_filename *fname, st
467  
468         dxtrace(printk("Look up %x", hash));
469         while (1) {
470 +               if (indirect == 0) { /* the last index level */
471 +                       /* NB: ext4_htree_dx_lock() could be noop if
472 +                        * DX-lock flag is not set for current operation */
473 +                       ext4_htree_dx_lock(lck, dx);
474 +                       ext4_htree_spin_lock(lck, dx, NULL);
475 +               }
476                 count = dx_get_count(entries);
477 -               if (!count || count > dx_get_limit(entries)) {
478 +               if (count == 0 || count > dx_get_limit(entries)) {
479 +                       ext4_htree_spin_unlock(lck); /* release spin */
480                         ext4_warning_inode(dir,
481                                            "dx entry: count %u beyond limit %u",
482                                            count, dx_get_limit(entries));
483 @@ -864,8 +1103,70 @@ dx_probe(struct ext4_filename *fname, st
484                                dx_get_block(at)));
485                 frame->entries = entries;
486                 frame->at = at;
487 -               if (!indirect--)
488 +
489 +               if (indirect == 0) { /* the last index level */
490 +                       struct ext4_dir_lock_data *ld;
491 +                       u64 myblock;
492 +
493 +                       /* By default we only lock DE-block, however, we will
494 +                        * also lock the last level DX-block if:
495 +                        * a) there is hash collision
496 +                        *    we will set DX-lock flag (a few lines below)
497 +                        *    and redo to lock DX-block
498 +                        *    see detail in dx_probe_hash_collision()
499 +                        * b) it's a retry from splitting
500 +                        *    we need to lock the last level DX-block so nobody
501 +                        *    else can split any leaf blocks under the same
502 +                        *    DX-block, see detail in ext4_dx_add_entry()
503 +                        */
504 +                       if (ext4_htree_dx_locked(lck)) {
505 +                               /* DX-block is locked, just lock DE-block
506 +                                * and return */
507 +                               ext4_htree_spin_unlock(lck);
508 +                               if (!ext4_htree_safe_locked(lck))
509 +                                       ext4_htree_de_lock(lck, frame->at);
510 +                               return frame;
511 +                       }
512 +                       /* it's pdirop and no DX lock */
513 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
514 +                           DX_HASH_COL_YES) {
515 +                               /* found hash collision, set DX-lock flag
516 +                                * and retry to abtain DX-lock */
517 +                               ext4_htree_spin_unlock(lck);
518 +                               ext4_htree_dx_need_lock(lck);
519 +                               continue;
520 +                       }
521 +                       ld = ext4_htree_lock_data(lck);
522 +                       /* because I don't lock DX, so @at can't be trusted
523 +                        * after I release spinlock so I have to save it */
524 +                       ld->ld_at = at;
525 +                       ld->ld_at_entry = *at;
526 +                       ld->ld_count = dx_get_count(entries);
527 +
528 +                       frame->at = &ld->ld_at_entry;
529 +                       myblock = dx_get_block(at);
530 +
531 +                       /* NB: ordering locking */
532 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
533 +                       /* other thread can split this DE-block because:
534 +                        * a) I don't have lock for the DE-block yet
535 +                        * b) I released spinlock on DX-block
536 +                        * if it happened I can detect it by listening
537 +                        * splitting event on this DE-block */
538 +                       ext4_htree_de_lock(lck, frame->at);
539 +                       ext4_htree_spin_stop_listen(lck);
540 +
541 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
542 +                               /* someone split this DE-block before
543 +                                * I locked it, I need to retry and lock
544 +                                * valid DE-block */
545 +                               ext4_htree_de_unlock(lck);
546 +                               continue;
547 +                       }
548                         return frame;
549 +               }
550 +               dx = at;
551 +               indirect--;
552                 frame++;
553                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
554                 if (IS_ERR(frame->bh)) {
555 @@ -934,7 +1235,7 @@ static void dx_release(struct dx_frame *
556  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
557                                  struct dx_frame *frame,
558                                  struct dx_frame *frames,
559 -                                __u32 *start_hash)
560 +                                __u32 *start_hash, struct htree_lock *lck)
561  {
562         struct dx_frame *p;
563         struct buffer_head *bh;
564 @@ -949,12 +1250,22 @@ static int ext4_htree_next_block(struct
565          * this loop, num_frames indicates the number of interior
566          * nodes need to be read.
567          */
568 +       ext4_htree_de_unlock(lck);
569         while (1) {
570 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
571 +                       /* num_frames > 0 :
572 +                        *   DX block
573 +                        * ext4_htree_dx_locked:
574 +                        *   frame->at is reliable pointer returned by dx_probe,
575 +                        *   otherwise dx_probe already knew no collision */
576                 if (++(p->at) < p->entries + dx_get_count(p->entries))
577                         break;
578 +               }
579                 if (p == frames)
580                         return 0;
581                 num_frames++;
582 +               if (num_frames == 1)
583 +                       ext4_htree_dx_unlock(lck);
584                 p--;
585         }
586  
587 @@ -977,6 +1288,13 @@ static int ext4_htree_next_block(struct
588          * block so no check is necessary
589          */
590         while (num_frames--) {
591 +               if (num_frames == 0) {
592 +                       /* it's not always necessary, we just don't want to
593 +                        * detect hash collision again */
594 +                       ext4_htree_dx_need_lock(lck);
595 +                       ext4_htree_dx_lock(lck, p->at);
596 +               }
597 +
598                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
599                 if (IS_ERR(bh))
600                         return PTR_ERR(bh);
601 @@ -985,6 +1303,7 @@ static int ext4_htree_next_block(struct
602                 p->bh = bh;
603                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
604         }
605 +       ext4_htree_de_lock(lck, p->at);
606         return 1;
607  }
608  
609 @@ -1132,10 +1451,10 @@ int ext4_htree_fill_tree(struct file *di
610         }
611         hinfo.hash = start_hash;
612         hinfo.minor_hash = 0;
613 -       frame = dx_probe(NULL, dir, &hinfo, frames);
614 +       /* assume it's PR locked */
615 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
616         if (IS_ERR(frame))
617                 return PTR_ERR(frame);
618 -
619         /* Add '.' and '..' from the htree header */
620         if (!start_hash && !start_minor_hash) {
621                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
622 @@ -1175,7 +1494,7 @@ int ext4_htree_fill_tree(struct file *di
623                 count += ret;
624                 hashval = ~0;
625                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
626 -                                           frame, frames, &hashval);
627 +                                           frame, frames, &hashval, NULL);
628                 *next_hash = hashval;
629                 if (ret < 0) {
630                         err = ret;
631 @@ -1451,7 +1770,7 @@ static int is_dx_internal_node(struct in
632  static struct buffer_head *__ext4_find_entry(struct inode *dir,
633                                              struct ext4_filename *fname,
634                                              struct ext4_dir_entry_2 **res_dir,
635 -                                            int *inlined)
636 +                                            int *inlined, struct htree_lock *lck)
637  {
638         struct super_block *sb;
639         struct buffer_head *bh_use[NAMEI_RA_SIZE];
640 @@ -1493,7 +1812,7 @@ static struct buffer_head *__ext4_find_e
641                 goto restart;
642         }
643         if (is_dx(dir)) {
644 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
645 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
646                 /*
647                  * On success, or if the error was file not found,
648                  * return.  Otherwise, fall back to doing a search the
649 @@ -1503,6 +1822,7 @@ static struct buffer_head *__ext4_find_e
650                         goto cleanup_and_exit;
651                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
652                                "falling back\n"));
653 +               ext4_htree_safe_relock(lck);
654                 ret = NULL;
655         }
656         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
657 @@ -1591,10 +1911,10 @@ cleanup_and_exit:
658         return ret;
659  }
660  
661 -static struct buffer_head *ext4_find_entry(struct inode *dir,
662 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
663                                            const struct qstr *d_name,
664                                            struct ext4_dir_entry_2 **res_dir,
665 -                                          int *inlined)
666 +                                          int *inlined, struct htree_lock *lck)
667  {
668         int err;
669         struct ext4_filename fname;
670 @@ -1606,12 +1926,14 @@ static struct buffer_head *ext4_find_ent
671         if (err)
672                 return ERR_PTR(err);
673  
674 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
675 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
676  
677         ext4_fname_free_filename(&fname);
678         return bh;
679  }
680  
681 +EXPORT_SYMBOL(ext4_find_entry_locked);
682 +
683  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
684                                              struct dentry *dentry,
685                                              struct ext4_dir_entry_2 **res_dir)
686 @@ -1626,7 +1948,7 @@ static struct buffer_head *ext4_lookup_e
687         if (err)
688                 return ERR_PTR(err);
689  
690 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
691 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
692  
693         ext4_fname_free_filename(&fname);
694         return bh;
695 @@ -1634,7 +1956,8 @@ static struct buffer_head *ext4_lookup_e
696  
697  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
698                         struct ext4_filename *fname,
699 -                       struct ext4_dir_entry_2 **res_dir)
700 +                       struct ext4_dir_entry_2 **res_dir,
701 +                       struct htree_lock *lck)
702  {
703         struct super_block * sb = dir->i_sb;
704         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
705 @@ -1645,7 +1968,7 @@ static struct buffer_head * ext4_dx_find
706  #ifdef CONFIG_FS_ENCRYPTION
707         *res_dir = NULL;
708  #endif
709 -       frame = dx_probe(fname, dir, NULL, frames);
710 +       frame = dx_probe(fname, dir, NULL, frames, lck);
711         if (IS_ERR(frame))
712                 return (struct buffer_head *) frame;
713         do {
714 @@ -1667,7 +1990,7 @@ static struct buffer_head * ext4_dx_find
715  
716                 /* Check to see if we should continue to search */
717                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
718 -                                              frames, NULL);
719 +                                              frames, NULL, lck);
720                 if (retval < 0) {
721                         ext4_warning_inode(dir,
722                                 "error %d reading directory index block",
723 @@ -1847,8 +2170,9 @@ static struct ext4_dir_entry_2* dx_pack_
724   * Returns pointer to de in block into which the new entry will be inserted.
725   */
726  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
727 -                       struct buffer_head **bh,struct dx_frame *frame,
728 -                       struct dx_hash_info *hinfo)
729 +                       struct buffer_head **bh, struct dx_frame *frames,
730 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
731 +                       struct htree_lock *lck)
732  {
733         unsigned blocksize = dir->i_sb->s_blocksize;
734         unsigned count, continued;
735 @@ -1919,8 +2243,14 @@ static struct ext4_dir_entry_2 *do_split
736                                         hash2, split, count-split));
737  
738         /* Fancy dance to stay within two buffers */
739 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
740 -                             blocksize);
741 +       if (hinfo->hash < hash2) {
742 +               de2 = dx_move_dirents(data1, data2, map + split,
743 +                                     count - split, blocksize);
744 +       } else {
745 +               /* make sure we will add entry to the same block which
746 +                * we have already locked */
747 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
748 +       }
749         de = dx_pack_dirents(data1, blocksize);
750         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
751                                            (char *) de,
752 @@ -1938,12 +2268,21 @@ static struct ext4_dir_entry_2 *do_split
753         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
754                         blocksize, 1));
755  
756 -       /* Which block gets the new entry? */
757 -       if (hinfo->hash >= hash2) {
758 -               swap(*bh, bh2);
759 -               de = de2;
760 -       }
761 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
762 +                            frame->at); /* notify block is being split */
763 +       if (hinfo->hash < hash2) {
764         dx_insert_block(frame, hash2 + continued, newblock);
765 +
766 +       } else {
767 +               /* switch block number */
768 +               dx_insert_block(frame, hash2 + continued,
769 +                               dx_get_block(frame->at));
770 +               dx_set_block(frame->at, newblock);
771 +               (frame->at)++;
772 +       }
773 +       ext4_htree_spin_unlock(lck);
774 +       ext4_htree_dx_unlock(lck);
775 +
776         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
777         if (err)
778                 goto journal_error;
779 @@ -2213,7 +2552,7 @@ static int make_indexed_dir(handle_t *ha
780         if (retval)
781                 goto out_frames;        
782  
783 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
784 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
785         if (IS_ERR(de)) {
786                 retval = PTR_ERR(de);
787                 goto out_frames;
788 @@ -2323,8 +2662,8 @@ out:
789   * may not sleep between calling this and putting something into
790   * the entry, as someone else might have used it while you slept.
791   */
792 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
793 -                         struct inode *inode)
794 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
795 +                         struct inode *inode, struct htree_lock *lck)
796  {
797         struct inode *dir = d_inode(dentry->d_parent);
798         struct buffer_head *bh = NULL;
799 @@ -2375,9 +2714,10 @@ static int ext4_add_entry(handle_t *hand
800                 if (dentry->d_name.len == 2 &&
801                     memcmp(dentry->d_name.name, "..", 2) == 0)
802                         return ext4_update_dotdot(handle, dentry, inode);
803 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
804 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
805                 if (!retval || (retval != ERR_BAD_DX_DIR))
806                         goto out;
807 +               ext4_htree_safe_relock(lck);
808                 /* Can we just ignore htree data? */
809                 if (ext4_has_metadata_csum(sb)) {
810                         EXT4_ERROR_INODE(dir,
811 @@ -2438,12 +2778,14 @@ out:
812                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
813         return retval;
814  }
815 +EXPORT_SYMBOL(ext4_add_entry_locked);
816  
817  /*
818   * Returns 0 for success, or a negative error value
819   */
820  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
821 -                            struct inode *dir, struct inode *inode)
822 +                            struct inode *dir, struct inode *inode,
823 +                            struct htree_lock *lck)
824  {
825         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
826         struct dx_entry *entries, *at;
827 @@ -2455,7 +2797,7 @@ static int ext4_dx_add_entry(handle_t *h
828  
829  again:
830         restart = 0;
831 -       frame = dx_probe(fname, dir, NULL, frames);
832 +       frame = dx_probe(fname, dir, NULL, frames, lck);
833         if (IS_ERR(frame))
834                 return PTR_ERR(frame);
835         entries = frame->entries;
836 @@ -2490,6 +2832,12 @@ again:
837                 struct dx_node *node2;
838                 struct buffer_head *bh2;
839  
840 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
841 +                       ext4_htree_safe_relock(lck);
842 +                       restart = 1;
843 +                       goto cleanup;
844 +               }
845 +
846                 while (frame > frames) {
847                         if (dx_get_count((frame - 1)->entries) <
848                             dx_get_limit((frame - 1)->entries)) {
849 @@ -2591,8 +2939,32 @@ again:
850                         restart = 1;
851                         goto journal_error;
852                 }
853 +       } else if (!ext4_htree_dx_locked(lck)) {
854 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
855 +
856 +               /* not well protected, require DX lock */
857 +               ext4_htree_dx_need_lock(lck);
858 +               at = frame > frames ? (frame - 1)->at : NULL;
859 +
860 +               /* NB: no risk of deadlock because it's just a try.
861 +                *
862 +                * NB: we check ld_count for twice, the first time before
863 +                * having DX lock, the second time after holding DX lock.
864 +                *
865 +                * NB: We never free blocks for directory so far, which
866 +                * means value returned by dx_get_count() should equal to
867 +                * ld->ld_count if nobody split any DE-block under @at,
868 +                * and ld->ld_at still points to valid dx_entry. */
869 +               if ((ld->ld_count != dx_get_count(entries)) ||
870 +                   !ext4_htree_dx_lock_try(lck, at) ||
871 +                   (ld->ld_count != dx_get_count(entries))) {
872 +                       restart = 1;
873 +                       goto cleanup;
874 +               }
875 +               /* OK, I've got DX lock and nothing changed */
876 +               frame->at = ld->ld_at;
877         }
878 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
879 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
880         if (IS_ERR(de)) {
881                 err = PTR_ERR(de);
882                 goto cleanup;
883 @@ -2603,6 +2975,8 @@ again:
884  journal_error:
885         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
886  cleanup:
887 +       ext4_htree_dx_unlock(lck);
888 +       ext4_htree_de_unlock(lck);
889         brelse(bh);
890         dx_release(frames);
891         /* @restart is true means htree-path has been changed, we need to
892 Index: linux-stage/fs/ext4/super.c
893 ===================================================================
894 --- linux-stage.orig/fs/ext4/super.c
895 +++ linux-stage/fs/ext4/super.c
896 @@ -1086,6 +1086,7 @@ static struct inode *ext4_alloc_inode(st
897  
898         inode_set_iversion(&ei->vfs_inode, 1);
899         spin_lock_init(&ei->i_raw_lock);
900 +       sema_init(&ei->i_append_sem, 1);
901         INIT_LIST_HEAD(&ei->i_prealloc_list);
902         spin_lock_init(&ei->i_prealloc_lock);
903         ext4_es_init_tree(&ei->i_es_tree);