Whamcloud - gitweb
LU-17711 osd-ldiskfs: do not delete dotdot during rename
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / linux-5.10 / ext4-pdirop.patch
1 LU-50 ldiskfs: pdirops patch for ldiskfs
2
3 Single directory performance is a critical for HPC workloads. In a
4 typical use case an application creates a separate output file for
5 each node and task in a job. As nodes and tasks increase, hundreds
6 of thousands of files may be created in a single directory within
7 a short window of time.
8 Today, both filename lookup and file system modifying operations
9 (such as create and unlink) are protected with a single lock for
10 an entire ldiskfs directory. PDO project will remove this
11 bottleneck by introducing a parallel locking mechanism for entire
12 ldiskfs directories. This work will enable multiple application
13 threads to simultaneously lookup, create and unlink in parallel.
14
15 This patch contains:
16   - pdirops support for ldiskfs
17   - N-level htree directory
18   - integrate with osd-ldiskfs
19
20 Signed-off-by: Liang Zhen <liang@whamcloud.com>
21 Change-Id: I269c0e3112e68f3acd79e860dab052a68c7d7aaa
22 Reviewed-on: http://review.whamcloud.com/375
23 Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
24 ---
25  fs/ext4/Makefile |    1 
26  fs/ext4/ext4.h   |   78 +++++++++
27  fs/ext4/namei.c  |  454 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
28  fs/ext4/super.c  |    1 
29  4 files changed, 494 insertions(+), 40 deletions(-)
30  create mode 100644 fs/ext4/htree_lock.c
31  create mode 100644 include/linux/htree_lock.h
32
33 --- a/fs/ext4/Makefile
34 +++ b/fs/ext4/Makefile
35 @@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
36  
37  ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
38                 extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
39 +               htree_lock.o \
40                 indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
41                 mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
42                 super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
43 --- a/fs/ext4/ext4.h
44 +++ b/fs/ext4/ext4.h
45 @@ -28,6 +28,7 @@
46  #include <linux/mutex.h>
47  #include <linux/timer.h>
48  #include <linux/wait.h>
49 +#include <linux/htree_lock.h>
50  #include <linux/sched/signal.h>
51  #include <linux/blockgroup_lock.h>
52  #include <linux/percpu_counter.h>
53 @@ -1003,6 +1004,9 @@ struct ext4_inode_info {
54         __u32   i_dtime;
55         ext4_fsblk_t    i_file_acl;
56  
57 +       /* following fields for parallel directory operations -bzzz */
58 +       struct semaphore i_append_sem;
59 +
60         /*
61          * i_block_group is the number of the block group which contains
62          * this file's inode.  Constant across the lifetime of the inode,
63 @@ -2398,6 +2402,72 @@ struct dx_hash_info
64   */
65  #define HASH_NB_ALWAYS         1
66  
67 +/* assume name-hash is protected by upper layer */
68 +#define EXT4_HTREE_LOCK_HASH   0
69 +
70 +enum ext4_pdo_lk_types {
71 +#if EXT4_HTREE_LOCK_HASH
72 +       EXT4_LK_HASH,
73 +#endif
74 +       EXT4_LK_DX,             /* index block */
75 +       EXT4_LK_DE,             /* directory entry block */
76 +       EXT4_LK_SPIN,           /* spinlock */
77 +       EXT4_LK_MAX,
78 +};
79 +
80 +/* read-only bit */
81 +#define EXT4_LB_RO(b)          (1 << (b))
82 +/* read + write, high bits for writer */
83 +#define EXT4_LB_RW(b)          ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
84 +
85 +enum ext4_pdo_lock_bits {
86 +       /* DX lock bits */
87 +       EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
88 +       EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
89 +       /* DE lock bits */
90 +       EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
91 +       EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
92 +       /* DX spinlock bits */
93 +       EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
94 +       EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
95 +       /* accurate searching */
96 +       EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
97 +};
98 +
99 +enum ext4_pdo_lock_opc {
100 +       /* external */
101 +       EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
102 +       EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
103 +                                  EXT4_LB_EXACT),
104 +       EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
105 +                                  EXT4_LB_EXACT),
106 +       EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
107 +
108 +       /* internal */
109 +       EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
110 +                                  EXT4_LB_EXACT),
111 +       EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
112 +       EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
113 +};
114 +
115 +extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
116 +#define ext4_htree_lock_head_free(lhead)       htree_lock_head_free(lhead)
117 +
118 +extern struct htree_lock *ext4_htree_lock_alloc(void);
119 +#define ext4_htree_lock_free(lck)              htree_lock_free(lck)
120 +
121 +extern void ext4_htree_lock(struct htree_lock *lck,
122 +                           struct htree_lock_head *lhead,
123 +                           struct inode *dir, unsigned flags);
124 +#define ext4_htree_unlock(lck)                  htree_unlock(lck)
125 +
126 +extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
127 +                                       const struct qstr *d_name,
128 +                                       struct ext4_dir_entry_2 **res_dir,
129 +                                       int *inlined, struct htree_lock *lck);
130 +extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
131 +                     struct inode *inode, struct htree_lock *lck);
132 +
133  struct ext4_filename {
134         const struct qstr *usr_fname;
135         struct fscrypt_str disk_name;
136 @@ -2772,12 +2842,20 @@ void ext4_insert_dentry(struct inode *in
137                         struct ext4_filename *fname, void *data);
138  static inline void ext4_update_dx_flag(struct inode *inode)
139  {
140 +       /* Disable it for ldiskfs, because going from a DX directory to
141 +        * a non-DX directory while it is in use will completely break
142 +        * the htree-locking.
143 +        * If we really want to support this operation in the future,
144 +        * we need to exclusively lock the directory at here which will
145 +        * increase complexity of code */
146 +#if 0
147         if (!ext4_has_feature_dir_index(inode->i_sb) &&
148             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
149                 /* ext4_iget() should have caught this... */
150                 WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
151                 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
152         }
153 +#endif
154  }
155  static const unsigned char ext4_filetype_table[] = {
156         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
157 --- a/fs/ext4/namei.c
158 +++ b/fs/ext4/namei.c
159 @@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
160                                         ext4_lblk_t *block)
161  {
162         struct buffer_head *bh;
163 +       struct ext4_inode_info *ei = EXT4_I(inode);
164         int err;
165  
166         if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
167 @@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t
168                       EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
169                 return ERR_PTR(-ENOSPC);
170  
171 +       /* with parallel dir operations all appends
172 +       * have to be serialized -bzzz */
173 +       down(&ei->i_append_sem);
174 +
175         *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
176  
177         bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
178 -       if (IS_ERR(bh))
179 +       if (IS_ERR(bh)) {
180 +               up(&ei->i_append_sem);
181                 return bh;
182 +       }
183         inode->i_size += inode->i_sb->s_blocksize;
184         EXT4_I(inode)->i_disksize = inode->i_size;
185         BUFFER_TRACE(bh, "get_write_access");
186         err = ext4_journal_get_write_access(handle, bh);
187 +       up(&ei->i_append_sem);
188         if (err) {
189                 brelse(bh);
190                 ext4_std_error(inode->i_sb, err);
191 @@ -271,7 +279,8 @@ static unsigned dx_node_limit(struct ino
192  static struct dx_frame *dx_probe(struct ext4_filename *fname,
193                                  struct inode *dir,
194                                  struct dx_hash_info *hinfo,
195 -                                struct dx_frame *frame);
196 +                                struct dx_frame *frame,
197 +                                struct htree_lock *lck);
198  static void dx_release(struct dx_frame *frames);
199  static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
200                        unsigned blocksize, struct dx_hash_info *hinfo,
201 @@ -285,12 +294,13 @@ static void dx_insert_block(struct dx_fr
202  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
203                                  struct dx_frame *frame,
204                                  struct dx_frame *frames,
205 -                                __u32 *start_hash);
206 +                                __u32 *start_hash, struct htree_lock *lck);
207  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
208                 struct ext4_filename *fname,
209 -               struct ext4_dir_entry_2 **res_dir);
210 +               struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
211  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
212 -                            struct inode *dir, struct inode *inode);
213 +                            struct inode *dir, struct inode *inode,
214 +                            struct htree_lock *lck);
215  
216  /* checksumming functions */
217  void ext4_initialize_dirent_tail(struct buffer_head *bh,
218 @@ -754,6 +764,227 @@ struct stats dx_show_entries(struct dx_h
219  }
220  #endif /* DX_DEBUG */
221  
222 +/* private data for htree_lock */
223 +struct ext4_dir_lock_data {
224 +       unsigned                ld_flags;  /* bits-map for lock types */
225 +       unsigned                ld_count;  /* # entries of the last DX block */
226 +       struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
227 +       struct dx_entry         *ld_at;    /* position of leaf dx_entry */
228 +};
229 +
230 +#define ext4_htree_lock_data(l)        ((struct ext4_dir_lock_data *)(l)->lk_private)
231 +#define ext4_find_entry(dir, name, dirent, inline) \
232 +                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
233 +#define ext4_add_entry(handle, dentry, inode) \
234 +                       ext4_add_entry_locked(handle, dentry, inode, NULL)
235 +
236 +/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
237 +#define EXT4_HTREE_NODE_CHANGED        (0xcafeULL << 32)
238 +
239 +static void ext4_htree_event_cb(void *target, void *event)
240 +{
241 +       u64 *block = (u64 *)target;
242 +
243 +       if (*block == dx_get_block((struct dx_entry *)event))
244 +               *block = EXT4_HTREE_NODE_CHANGED;
245 +}
246 +
247 +struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
248 +{
249 +       struct htree_lock_head *lhead;
250 +
251 +       lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
252 +       if (lhead != NULL) {
253 +               htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
254 +                                       ext4_htree_event_cb);
255 +       }
256 +       return lhead;
257 +}
258 +EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
259 +
260 +struct htree_lock *ext4_htree_lock_alloc(void)
261 +{
262 +       return htree_lock_alloc(EXT4_LK_MAX,
263 +                               sizeof(struct ext4_dir_lock_data));
264 +}
265 +EXPORT_SYMBOL(ext4_htree_lock_alloc);
266 +
267 +static htree_lock_mode_t ext4_htree_mode(unsigned flags)
268 +{
269 +       switch (flags) {
270 +       default: /* 0 or unknown flags require EX lock */
271 +               return HTREE_LOCK_EX;
272 +       case EXT4_HLOCK_READDIR:
273 +               return HTREE_LOCK_PR;
274 +       case EXT4_HLOCK_LOOKUP:
275 +               return HTREE_LOCK_CR;
276 +       case EXT4_HLOCK_DEL:
277 +       case EXT4_HLOCK_ADD:
278 +               return HTREE_LOCK_CW;
279 +       }
280 +}
281 +
282 +/* return PR for read-only operations, otherwise return EX */
283 +static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
284 +{
285 +       int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
286 +
287 +       /* 0 requires EX lock */
288 +       return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
289 +}
290 +
291 +static int ext4_htree_safe_locked(struct htree_lock *lck)
292 +{
293 +       int writer;
294 +
295 +       if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
296 +               return 1;
297 +
298 +       writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
299 +                EXT4_LB_DE;
300 +       if (writer) /* all readers & writers are excluded? */
301 +               return lck->lk_mode == HTREE_LOCK_EX;
302 +
303 +       /* all writers are excluded? */
304 +       return lck->lk_mode == HTREE_LOCK_PR ||
305 +              lck->lk_mode == HTREE_LOCK_PW ||
306 +              lck->lk_mode == HTREE_LOCK_EX;
307 +}
308 +
309 +/* relock htree_lock with EX mode if it's change operation, otherwise
310 + * relock it with PR mode. It's noop if PDO is disabled. */
311 +static void ext4_htree_safe_relock(struct htree_lock *lck)
312 +{
313 +       if (!ext4_htree_safe_locked(lck)) {
314 +               unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
315 +
316 +               htree_change_lock(lck, ext4_htree_safe_mode(flags));
317 +       }
318 +}
319 +
320 +void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
321 +                    struct inode *dir, unsigned flags)
322 +{
323 +       htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
324 +                                             ext4_htree_safe_mode(flags);
325 +
326 +       ext4_htree_lock_data(lck)->ld_flags = flags;
327 +       htree_lock(lck, lhead, mode);
328 +       if (!is_dx(dir))
329 +               ext4_htree_safe_relock(lck); /* make sure it's safe locked */
330 +}
331 +EXPORT_SYMBOL(ext4_htree_lock);
332 +
333 +static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
334 +                               unsigned lmask, int wait, void *ev)
335 +{
336 +       u32     key = (at == NULL) ? 0 : dx_get_block(at);
337 +       u32     mode;
338 +
339 +       /* NOOP if htree is well protected or caller doesn't require the lock */
340 +       if (ext4_htree_safe_locked(lck) ||
341 +          !(ext4_htree_lock_data(lck)->ld_flags & lmask))
342 +               return 1;
343 +
344 +       mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
345 +               HTREE_LOCK_PW : HTREE_LOCK_PR;
346 +       while (1) {
347 +               if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
348 +                       return 1;
349 +               if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
350 +                       return 0;
351 +               cpu_relax(); /* spin until granted */
352 +       }
353 +}
354 +
355 +static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
356 +{
357 +       return ext4_htree_safe_locked(lck) ||
358 +              htree_node_is_granted(lck, ffz(~lmask));
359 +}
360 +
361 +static void ext4_htree_node_unlock(struct htree_lock *lck,
362 +                                  unsigned lmask, void *buf)
363 +{
364 +       /* NB: it's safe to call mutiple times or even it's not locked */
365 +       if (!ext4_htree_safe_locked(lck) &&
366 +            htree_node_is_granted(lck, ffz(~lmask)))
367 +               htree_node_unlock(lck, ffz(~lmask), buf);
368 +}
369 +
370 +#define ext4_htree_dx_lock(lck, key)           \
371 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
372 +#define ext4_htree_dx_lock_try(lck, key)       \
373 +       ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
374 +#define ext4_htree_dx_unlock(lck)              \
375 +       ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
376 +#define ext4_htree_dx_locked(lck)              \
377 +       ext4_htree_node_locked(lck, EXT4_LB_DX)
378 +
379 +static void ext4_htree_dx_need_lock(struct htree_lock *lck)
380 +{
381 +       struct ext4_dir_lock_data *ld;
382 +
383 +       if (ext4_htree_safe_locked(lck))
384 +               return;
385 +
386 +       ld = ext4_htree_lock_data(lck);
387 +       switch (ld->ld_flags) {
388 +       default:
389 +               return;
390 +       case EXT4_HLOCK_LOOKUP:
391 +               ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
392 +               return;
393 +       case EXT4_HLOCK_DEL:
394 +               ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
395 +               return;
396 +       case EXT4_HLOCK_ADD:
397 +               ld->ld_flags = EXT4_HLOCK_SPLIT;
398 +               return;
399 +       }
400 +}
401 +
402 +#define ext4_htree_de_lock(lck, key)           \
403 +       ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
404 +#define ext4_htree_de_unlock(lck)              \
405 +       ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
406 +
407 +#define ext4_htree_spin_lock(lck, key, event)  \
408 +       ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
409 +#define ext4_htree_spin_unlock(lck)            \
410 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
411 +#define ext4_htree_spin_unlock_listen(lck, p)  \
412 +       ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
413 +
414 +static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
415 +{
416 +       if (!ext4_htree_safe_locked(lck) &&
417 +           htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
418 +               htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
419 +}
420 +
421 +enum {
422 +       DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
423 +       DX_HASH_COL_YES,        /* there is collision and it does matter */
424 +       DX_HASH_COL_NO,         /* there is no collision */
425 +};
426 +
427 +static int dx_probe_hash_collision(struct htree_lock *lck,
428 +                                  struct dx_entry *entries,
429 +                                  struct dx_entry *at, u32 hash)
430 +{
431 +       if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
432 +               return DX_HASH_COL_IGNORE; /* don't care about collision */
433 +
434 +       } else if (at == entries + dx_get_count(entries) - 1) {
435 +               return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
436 +
437 +       } else { /* hash collision? */
438 +               return ((dx_get_hash(at + 1) & ~1) == hash) ?
439 +                       DX_HASH_COL_YES : DX_HASH_COL_NO;
440 +       }
441 +}
442 +
443  /*
444   * Probe for a directory leaf block to search.
445   *
446 @@ -765,10 +996,11 @@ struct stats dx_show_entries(struct dx_h
447   */
448  static struct dx_frame *
449  dx_probe(struct ext4_filename *fname, struct inode *dir,
450 -        struct dx_hash_info *hinfo, struct dx_frame *frame_in)
451 +        struct dx_hash_info *hinfo, struct dx_frame *frame_in,
452 +        struct htree_lock *lck)
453  {
454         unsigned count, indirect;
455 -       struct dx_entry *at, *entries, *p, *q, *m;
456 +       struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
457         struct dx_root_info *info;
458         struct dx_frame *frame = frame_in;
459         struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
460 @@ -830,8 +1062,15 @@ dx_probe(struct ext4_filename *fname, st
461  
462         dxtrace(printk("Look up %x", hash));
463         while (1) {
464 +               if (indirect == 0) { /* the last index level */
465 +                       /* NB: ext4_htree_dx_lock() could be noop if
466 +                        * DX-lock flag is not set for current operation */
467 +                       ext4_htree_dx_lock(lck, dx);
468 +                       ext4_htree_spin_lock(lck, dx, NULL);
469 +               }
470                 count = dx_get_count(entries);
471 -               if (!count || count > dx_get_limit(entries)) {
472 +               if (count == 0 || count > dx_get_limit(entries)) {
473 +                       ext4_htree_spin_unlock(lck); /* release spin */
474                         ext4_warning_inode(dir,
475                                            "dx entry: count %u beyond limit %u",
476                                            count, dx_get_limit(entries));
477 @@ -870,8 +1109,70 @@ dx_probe(struct ext4_filename *fname, st
478                                dx_get_block(at)));
479                 frame->entries = entries;
480                 frame->at = at;
481 -               if (!indirect--)
482 +
483 +               if (indirect == 0) { /* the last index level */
484 +                       struct ext4_dir_lock_data *ld;
485 +                       u64 myblock;
486 +
487 +                       /* By default we only lock DE-block, however, we will
488 +                        * also lock the last level DX-block if:
489 +                        * a) there is hash collision
490 +                        *    we will set DX-lock flag (a few lines below)
491 +                        *    and redo to lock DX-block
492 +                        *    see detail in dx_probe_hash_collision()
493 +                        * b) it's a retry from splitting
494 +                        *    we need to lock the last level DX-block so nobody
495 +                        *    else can split any leaf blocks under the same
496 +                        *    DX-block, see detail in ext4_dx_add_entry()
497 +                        */
498 +                       if (ext4_htree_dx_locked(lck)) {
499 +                               /* DX-block is locked, just lock DE-block
500 +                                * and return */
501 +                               ext4_htree_spin_unlock(lck);
502 +                               if (!ext4_htree_safe_locked(lck))
503 +                                       ext4_htree_de_lock(lck, frame->at);
504 +                               return frame;
505 +                       }
506 +                       /* it's pdirop and no DX lock */
507 +                       if (dx_probe_hash_collision(lck, entries, at, hash) ==
508 +                           DX_HASH_COL_YES) {
509 +                               /* found hash collision, set DX-lock flag
510 +                                * and retry to abtain DX-lock */
511 +                               ext4_htree_spin_unlock(lck);
512 +                               ext4_htree_dx_need_lock(lck);
513 +                               continue;
514 +                       }
515 +                       ld = ext4_htree_lock_data(lck);
516 +                       /* because I don't lock DX, so @at can't be trusted
517 +                        * after I release spinlock so I have to save it */
518 +                       ld->ld_at = at;
519 +                       ld->ld_at_entry = *at;
520 +                       ld->ld_count = dx_get_count(entries);
521 +
522 +                       frame->at = &ld->ld_at_entry;
523 +                       myblock = dx_get_block(at);
524 +
525 +                       /* NB: ordering locking */
526 +                       ext4_htree_spin_unlock_listen(lck, &myblock);
527 +                       /* other thread can split this DE-block because:
528 +                        * a) I don't have lock for the DE-block yet
529 +                        * b) I released spinlock on DX-block
530 +                        * if it happened I can detect it by listening
531 +                        * splitting event on this DE-block */
532 +                       ext4_htree_de_lock(lck, frame->at);
533 +                       ext4_htree_spin_stop_listen(lck);
534 +
535 +                       if (myblock == EXT4_HTREE_NODE_CHANGED) {
536 +                               /* someone split this DE-block before
537 +                                * I locked it, I need to retry and lock
538 +                                * valid DE-block */
539 +                               ext4_htree_de_unlock(lck);
540 +                               continue;
541 +                       }
542                         return frame;
543 +               }
544 +               dx = at;
545 +               indirect--;
546                 frame++;
547                 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
548                 if (IS_ERR(frame->bh)) {
549 @@ -940,7 +1241,7 @@ static void dx_release(struct dx_frame *
550  static int ext4_htree_next_block(struct inode *dir, __u32 hash,
551                                  struct dx_frame *frame,
552                                  struct dx_frame *frames,
553 -                                __u32 *start_hash)
554 +                                __u32 *start_hash, struct htree_lock *lck)
555  {
556         struct dx_frame *p;
557         struct buffer_head *bh;
558 @@ -955,12 +1256,22 @@ static int ext4_htree_next_block(struct
559          * this loop, num_frames indicates the number of interior
560          * nodes need to be read.
561          */
562 +       ext4_htree_de_unlock(lck);
563         while (1) {
564 -               if (++(p->at) < p->entries + dx_get_count(p->entries))
565 -                       break;
566 +               if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
567 +                       /* num_frames > 0 :
568 +                        *   DX block
569 +                        * ext4_htree_dx_locked:
570 +                        *   frame->at is reliable pointer returned by dx_probe,
571 +                        *   otherwise dx_probe already knew no collision */
572 +                       if (++(p->at) < p->entries + dx_get_count(p->entries))
573 +                               break;
574 +               }
575                 if (p == frames)
576                         return 0;
577                 num_frames++;
578 +               if (num_frames == 1)
579 +                       ext4_htree_dx_unlock(lck);
580                 p--;
581         }
582  
583 @@ -983,6 +1294,13 @@ static int ext4_htree_next_block(struct
584          * block so no check is necessary
585          */
586         while (num_frames--) {
587 +               if (num_frames == 0) {
588 +                       /* it's not always necessary, we just don't want to
589 +                        * detect hash collision again */
590 +                       ext4_htree_dx_need_lock(lck);
591 +                       ext4_htree_dx_lock(lck, p->at);
592 +               }
593 +
594                 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
595                 if (IS_ERR(bh))
596                         return PTR_ERR(bh);
597 @@ -991,6 +1309,7 @@ static int ext4_htree_next_block(struct
598                 p->bh = bh;
599                 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
600         }
601 +       ext4_htree_de_lock(lck, p->at);
602         return 1;
603  }
604  
605 @@ -1135,10 +1454,10 @@ int ext4_htree_fill_tree(struct file *di
606         }
607         hinfo.hash = start_hash;
608         hinfo.minor_hash = 0;
609 -       frame = dx_probe(NULL, dir, &hinfo, frames);
610 +       /* assume it's PR locked */
611 +       frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
612         if (IS_ERR(frame))
613                 return PTR_ERR(frame);
614 -
615         /* Add '.' and '..' from the htree header */
616         if (!start_hash && !start_minor_hash) {
617                 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
618 @@ -1178,7 +1497,7 @@ int ext4_htree_fill_tree(struct file *di
619                 count += ret;
620                 hashval = ~0;
621                 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
622 -                                           frame, frames, &hashval);
623 +                                           frame, frames, &hashval, NULL);
624                 *next_hash = hashval;
625                 if (ret < 0) {
626                         err = ret;
627 @@ -1454,7 +1773,7 @@ static int is_dx_internal_node(struct in
628  static struct buffer_head *__ext4_find_entry(struct inode *dir,
629                                              struct ext4_filename *fname,
630                                              struct ext4_dir_entry_2 **res_dir,
631 -                                            int *inlined)
632 +                                            int *inlined, struct htree_lock *lck)
633  {
634         struct super_block *sb;
635         struct buffer_head *bh_use[NAMEI_RA_SIZE];
636 @@ -1496,7 +1815,7 @@ static struct buffer_head *__ext4_find_e
637                 goto restart;
638         }
639         if (is_dx(dir)) {
640 -               ret = ext4_dx_find_entry(dir, fname, res_dir);
641 +               ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
642                 /*
643                  * On success, or if the error was file not found,
644                  * return.  Otherwise, fall back to doing a search the
645 @@ -1506,6 +1825,7 @@ static struct buffer_head *__ext4_find_e
646                         goto cleanup_and_exit;
647                 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
648                                "falling back\n"));
649 +               ext4_htree_safe_relock(lck);
650                 ret = NULL;
651         }
652         nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
653 @@ -1596,10 +1916,10 @@ cleanup_and_exit:
654         return ret;
655  }
656  
657 -static struct buffer_head *ext4_find_entry(struct inode *dir,
658 +struct buffer_head *ext4_find_entry_locked(struct inode *dir,
659                                            const struct qstr *d_name,
660                                            struct ext4_dir_entry_2 **res_dir,
661 -                                          int *inlined)
662 +                                          int *inlined, struct htree_lock *lck)
663  {
664         int err;
665         struct ext4_filename fname;
666 @@ -1611,12 +1931,14 @@ static struct buffer_head *ext4_find_ent
667         if (err)
668                 return ERR_PTR(err);
669  
670 -       bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
671 +       bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
672  
673         ext4_fname_free_filename(&fname);
674         return bh;
675  }
676  
677 +EXPORT_SYMBOL(ext4_find_entry_locked);
678 +
679  static struct buffer_head *ext4_lookup_entry(struct inode *dir,
680                                              struct dentry *dentry,
681                                              struct ext4_dir_entry_2 **res_dir)
682 @@ -1631,7 +1953,7 @@ static struct buffer_head *ext4_lookup_e
683         if (err)
684                 return ERR_PTR(err);
685  
686 -       bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
687 +       bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
688  
689         ext4_fname_free_filename(&fname);
690         return bh;
691 @@ -1639,7 +1961,8 @@ static struct buffer_head *ext4_lookup_e
692  
693  static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
694                         struct ext4_filename *fname,
695 -                       struct ext4_dir_entry_2 **res_dir)
696 +                       struct ext4_dir_entry_2 **res_dir,
697 +                       struct htree_lock *lck)
698  {
699         struct super_block * sb = dir->i_sb;
700         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
701 @@ -1650,7 +1973,7 @@ static struct buffer_head * ext4_dx_find
702  #ifdef CONFIG_FS_ENCRYPTION
703         *res_dir = NULL;
704  #endif
705 -       frame = dx_probe(fname, dir, NULL, frames);
706 +       frame = dx_probe(fname, dir, NULL, frames, lck);
707         if (IS_ERR(frame))
708                 return (struct buffer_head *) frame;
709         do {
710 @@ -1672,7 +1995,7 @@ static struct buffer_head * ext4_dx_find
711  
712                 /* Check to see if we should continue to search */
713                 retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
714 -                                              frames, NULL);
715 +                                              frames, NULL, lck);
716                 if (retval < 0) {
717                         ext4_warning_inode(dir,
718                                 "error %d reading directory index block",
719 @@ -1852,8 +2175,9 @@ static struct ext4_dir_entry_2* dx_pack_
720   * Returns pointer to de in block into which the new entry will be inserted.
721   */
722  static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
723 -                       struct buffer_head **bh,struct dx_frame *frame,
724 -                       struct dx_hash_info *hinfo)
725 +                       struct buffer_head **bh, struct dx_frame *frames,
726 +                       struct dx_frame *frame, struct dx_hash_info *hinfo,
727 +                       struct htree_lock *lck)
728  {
729         unsigned blocksize = dir->i_sb->s_blocksize;
730         unsigned count, continued;
731 @@ -1924,8 +2248,14 @@ static struct ext4_dir_entry_2 *do_split
732                                         hash2, split, count-split));
733  
734         /* Fancy dance to stay within two buffers */
735 -       de2 = dx_move_dirents(data1, data2, map + split, count - split,
736 -                             blocksize);
737 +       if (hinfo->hash < hash2) {
738 +               de2 = dx_move_dirents(data1, data2, map + split,
739 +                                     count - split, blocksize);
740 +       } else {
741 +               /* make sure we will add entry to the same block which
742 +                * we have already locked */
743 +               de2 = dx_move_dirents(data1, data2, map, split, blocksize);
744 +       }
745         de = dx_pack_dirents(data1, blocksize);
746         de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
747                                            (char *) de,
748 @@ -1943,12 +2273,21 @@ static struct ext4_dir_entry_2 *do_split
749         dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
750                         blocksize, 1));
751  
752 -       /* Which block gets the new entry? */
753 -       if (hinfo->hash >= hash2) {
754 -               swap(*bh, bh2);
755 -               de = de2;
756 +       ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
757 +                            frame->at); /* notify block is being split */
758 +       if (hinfo->hash < hash2) {
759 +               dx_insert_block(frame, hash2 + continued, newblock);
760 +
761 +       } else {
762 +               /* switch block number */
763 +               dx_insert_block(frame, hash2 + continued,
764 +                               dx_get_block(frame->at));
765 +               dx_set_block(frame->at, newblock);
766 +               (frame->at)++;
767         }
768 -       dx_insert_block(frame, hash2 + continued, newblock);
769 +       ext4_htree_spin_unlock(lck);
770 +       ext4_htree_dx_unlock(lck);
771 +
772         err = ext4_handle_dirty_dirblock(handle, dir, bh2);
773         if (err)
774                 goto journal_error;
775 @@ -2218,7 +2557,7 @@ static int make_indexed_dir(handle_t *ha
776         if (retval)
777                 goto out_frames;        
778  
779 -       de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
780 +       de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
781         if (IS_ERR(de)) {
782                 retval = PTR_ERR(de);
783                 goto out_frames;
784 @@ -2328,8 +2667,8 @@ out:
785   * may not sleep between calling this and putting something into
786   * the entry, as someone else might have used it while you slept.
787   */
788 -static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
789 -                         struct inode *inode)
790 +int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
791 +                         struct inode *inode, struct htree_lock *lck)
792  {
793         struct inode *dir = d_inode(dentry->d_parent);
794         struct buffer_head *bh = NULL;
795 @@ -2375,9 +2714,10 @@ static int ext4_add_entry(handle_t *hand
796                 return ext4_update_dotdot(handle, dentry, inode);
797  
798         if (is_dx(dir)) {
799 -               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
800 +               retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
801                 if (!retval || (retval != ERR_BAD_DX_DIR))
802                         goto out;
803 +               ext4_htree_safe_relock(lck);
804                 /* Can we just ignore htree data? */
805                 if (ext4_has_metadata_csum(sb)) {
806                         EXT4_ERROR_INODE(dir,
807 @@ -2440,12 +2780,14 @@ out:
808                 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
809         return retval;
810  }
811 +EXPORT_SYMBOL(ext4_add_entry_locked);
812  
813  /*
814   * Returns 0 for success, or a negative error value
815   */
816  static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
817 -                            struct inode *dir, struct inode *inode)
818 +                            struct inode *dir, struct inode *inode,
819 +                            struct htree_lock *lck)
820  {
821         struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
822         struct dx_entry *entries, *at;
823 @@ -2457,7 +2799,7 @@ static int ext4_dx_add_entry(handle_t *h
824  
825  again:
826         restart = 0;
827 -       frame = dx_probe(fname, dir, NULL, frames);
828 +       frame = dx_probe(fname, dir, NULL, frames, lck);
829         if (IS_ERR(frame))
830                 return PTR_ERR(frame);
831         entries = frame->entries;
832 @@ -2492,6 +2834,12 @@ again:
833                 struct dx_node *node2;
834                 struct buffer_head *bh2;
835  
836 +               if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
837 +                       ext4_htree_safe_relock(lck);
838 +                       restart = 1;
839 +                       goto cleanup;
840 +               }
841 +
842                 while (frame > frames) {
843                         if (dx_get_count((frame - 1)->entries) <
844                             dx_get_limit((frame - 1)->entries)) {
845 @@ -2594,8 +2942,32 @@ again:
846                         restart = 1;
847                         goto journal_error;
848                 }
849 +       } else if (!ext4_htree_dx_locked(lck)) {
850 +               struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
851 +
852 +               /* not well protected, require DX lock */
853 +               ext4_htree_dx_need_lock(lck);
854 +               at = frame > frames ? (frame - 1)->at : NULL;
855 +
856 +               /* NB: no risk of deadlock because it's just a try.
857 +                *
858 +                * NB: we check ld_count for twice, the first time before
859 +                * having DX lock, the second time after holding DX lock.
860 +                *
861 +                * NB: We never free blocks for directory so far, which
862 +                * means value returned by dx_get_count() should equal to
863 +                * ld->ld_count if nobody split any DE-block under @at,
864 +                * and ld->ld_at still points to valid dx_entry. */
865 +               if ((ld->ld_count != dx_get_count(entries)) ||
866 +                   !ext4_htree_dx_lock_try(lck, at) ||
867 +                   (ld->ld_count != dx_get_count(entries))) {
868 +                       restart = 1;
869 +                       goto cleanup;
870 +               }
871 +               /* OK, I've got DX lock and nothing changed */
872 +               frame->at = ld->ld_at;
873         }
874 -       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
875 +       de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
876         if (IS_ERR(de)) {
877                 err = PTR_ERR(de);
878                 goto cleanup;
879 @@ -2606,6 +2978,8 @@ again:
880  journal_error:
881         ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
882  cleanup:
883 +       ext4_htree_dx_unlock(lck);
884 +       ext4_htree_de_unlock(lck);
885         brelse(bh);
886         dx_release(frames);
887         /* @restart is true means htree-path has been changed, we need to
888 --- a/fs/ext4/super.c
889 +++ b/fs/ext4/super.c
890 @@ -1297,6 +1297,7 @@ static struct inode *ext4_alloc_inode(st
891  
892         inode_set_iversion(&ei->vfs_inode, 1);
893         spin_lock_init(&ei->i_raw_lock);
894 +       sema_init(&ei->i_append_sem, 1);
895         INIT_LIST_HEAD(&ei->i_prealloc_list);
896         atomic_set(&ei->i_prealloc_active, 0);
897         spin_lock_init(&ei->i_prealloc_lock);