Whamcloud - gitweb
LU-12477 ldiskfs: last cleanups 39/38139/6
authorJames Simmons <jsimmons@infradead.org>
Tue, 14 Apr 2020 15:13:57 +0000 (11:13 -0400)
committerOleg Drokin <green@whamcloud.com>
Fri, 1 May 2020 04:27:18 +0000 (04:27 +0000)
The patch to cleanup ldiskfs collided with the landing of the
ext4-mballoc-prefetch patch. Remove the last unsupported rhel7
bits. With the new Ubuntu 20 coming out we can drop Ubuntu16
support. Drop 3.12 kernel versions of SUSE.

Test-Parameters: trivial
Fixes: fc87b01f96e8 ("LU-12477 ldiskfs: remove obsolete ext4 patches")
Change-Id: I15f9f59ffb1275e2eaabf7ca543fd4c4829aaf9e
Signed-off-by: James Simmons <jsimmons@infradead.org>
Reviewed-on: https://review.whamcloud.com/38139
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
35 files changed:
ldiskfs/kernel_patches/patches/rhel7/ext4-mballoc-prefetch.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-corrupted-inode-block-bitmaps-handling-patches.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-osd-iop-common.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12sp1/ext4-attach-jinode-in-writepages.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12sp1/ext4-large-dir.patch [deleted file]
ldiskfs/kernel_patches/patches/sles12sp1/ext4-lookup-dotdot.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent-001.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache-001.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir-001.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-eas.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-misc.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop-001.patch [deleted file]
ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop.patch [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.2.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.3.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.4.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.5.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.6.series
ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12sp1.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-4.12-sles15.series
ldiskfs/kernel_patches/series/ldiskfs-4.4.0-45-ubuntu14+16.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-4.4.0-49-ubuntu14+16.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-4.4.0-62-ubuntu14+16.series [deleted file]
ldiskfs/kernel_patches/series/ldiskfs-4.4.0-73-ubuntu14+16.series [deleted file]

diff --git a/ldiskfs/kernel_patches/patches/rhel7/ext4-mballoc-prefetch.patch b/ldiskfs/kernel_patches/patches/rhel7/ext4-mballoc-prefetch.patch
deleted file mode 100644 (file)
index a5c35d0..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
---- linux-4.18/fs/ext4/balloc.c        2019-11-28 14:55:26.506546036 +0300
-+++ linux-4.18/fs/ext4/balloc.c        2019-12-02 11:21:50.565975537 +0300
-@@ -404,7 +404,8 @@ verified:
-  * Return buffer_head on success or NULL in case of failure.
-  */
- struct buffer_head *
--ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
-+ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
-+                               int ignore_locked)
- {
-       struct ext4_group_desc *desc;
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-@@ -435,6 +436,13 @@ ext4_read_block_bitmap_nowait(struct
-       if (bitmap_uptodate(bh))
-               goto verify;
-+      if (ignore_locked && buffer_locked(bh)) {
-+              /* buffer under IO already, do not wait
-+               * if called for prefetching */
-+              put_bh(bh);
-+              return NULL;
-+      }
-+
-       lock_buffer(bh);
-       if (bitmap_uptodate(bh)) {
-               unlock_buffer(bh);
-@@ -524,7 +532,7 @@ ext4_read_block_bitmap(struct super_b
-       struct buffer_head *bh;
-       int err;
--      bh = ext4_read_block_bitmap_nowait(sb, block_group);
-+      bh = ext4_read_block_bitmap_nowait(sb, block_group, 0);
-       if (!bh)
-               return NULL;
-       err = ext4_wait_block_bitmap(sb, block_group, bh);
---- linux-4.18/fs/ext4/ext4.h  2019-11-28 14:55:26.470545343 +0300
-+++ linux-4.18/fs/ext4/ext4.h  2019-12-02 11:21:40.795779972 +0300
-@@ -1446,6 +1446,8 @@ struct ext4_sb_info {
-       /* where last allocation was done - for stream allocation */
-       unsigned long s_mb_last_group;
-       unsigned long s_mb_last_start;
-+      unsigned int s_mb_prefetch;
-+      unsigned int s_mb_prefetch_limit;
-       /* stats for buddy allocator */
-       atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
-@@ -2401,7 +2403,8 @@ extern struct ext4_group_desc * ldisk
- extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
- extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
--                                              ext4_group_t block_group);
-+                                              ext4_group_t block_group,
-+                                              int ignore_locked);
- extern int ext4_wait_block_bitmap(struct super_block *sb,
-                                 ext4_group_t block_group,
-                                 struct buffer_head *bh);
---- linux-4.18/fs/ext4/mballoc.c       2019-11-28 14:55:26.500545920 +0300
-+++ linux-4.18/fs/ext4/mballoc.c       2019-12-02 11:21:46.656897291 +0300
-@@ -868,7 +868,7 @@ static int ext4_mb_init_cache(struct
-                       bh[i] = NULL;
-                       continue;
-               }
--              if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
-+              if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group, 0))) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-@@ -2104,6 +2112,87 @@ static int ext4_mb_good_group(struct
-       return 0;
- }
-+/*
-+ * each allocation context (i.e. a thread doing allocation) has own
-+ * sliding prefetch window of @s_mb_prefetch size which starts at the
-+ * very first goal and moves ahead of scaning.
-+ * a side effect is that subsequent allocations will likely find
-+ * the bitmaps in cache or at least in-flight.
-+ */
-+static void
-+ext4_mb_prefetch(struct ext4_allocation_context *ac,
-+                  ext4_group_t start)
-+{
-+      struct super_block *sb = ac->ac_sb;
-+      ext4_group_t ngroups = ext4_get_groups_count(sb);
-+      struct ext4_sb_info *sbi = EXT4_SB(sb);
-+      struct ext4_group_info *grp;
-+      ext4_group_t group = start;
-+      struct buffer_head *bh;
-+      int nr;
-+
-+      /* limit prefetching at cr=0, otherwise mballoc can
-+       * spend a lot of time loading imperfect groups */
-+      if (ac->ac_criteria < 2 && ac->ac_prefetch_ios >= sbi->s_mb_prefetch_limit)
-+              return;
-+
-+      /* batch prefetching to get few READs in flight */
-+      nr = ac->ac_prefetch - group;
-+      if (ac->ac_prefetch < group)
-+              /* wrapped to the first groups */
-+              nr += ngroups;
-+      if (nr > 0)
-+              return;
-+      BUG_ON(nr < 0);
-+
-+      nr = sbi->s_mb_prefetch;
-+      if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
-+              /* align to flex_bg to get more bitmas with a single IO */
-+              nr = (group / sbi->s_mb_prefetch) * sbi->s_mb_prefetch;
-+              nr = nr + sbi->s_mb_prefetch - group;
-+      }
-+      while (nr-- > 0) {
-+              grp = ext4_get_group_info(sb, group);
-+              /* ignore empty groups - those will be skipped
-+               * during the scanning as well */
-+              if (grp->bb_free > 0 && EXT4_MB_GRP_NEED_INIT(grp)) {
-+                      bh = ext4_read_block_bitmap_nowait(sb, group, 1);
-+                      if (bh && !IS_ERR(bh)) {
-+                              if (!buffer_uptodate(bh))
-+                                      ac->ac_prefetch_ios++;
-+                              brelse(bh);
-+                      }
-+              }
-+              if (++group >= ngroups)
-+                      group = 0;
-+      }
-+      ac->ac_prefetch = group;
-+}
-+
-+static void
-+ext4_mb_prefetch_fini(struct ext4_allocation_context *ac)
-+{
-+      struct ext4_group_info *grp;
-+      ext4_group_t group;
-+      int nr, rc;
-+
-+      /* initialize last window of prefetched groups */
-+      nr = ac->ac_prefetch_ios;
-+      if (nr > EXT4_SB(ac->ac_sb)->s_mb_prefetch)
-+              nr = EXT4_SB(ac->ac_sb)->s_mb_prefetch;
-+      group = ac->ac_prefetch;
-+      while (nr-- > 0) {
-+              grp = ext4_get_group_info(ac->ac_sb, group);
-+              if (grp->bb_free > 0 && EXT4_MB_GRP_NEED_INIT(grp)) {
-+                      rc = ext4_mb_init_group(ac->ac_sb, group);
-+                      if (rc)
-+                              break;
-+              }
-+              if (group-- == 0)
-+                      group = ext4_get_groups_count(ac->ac_sb) - 1;
-+      }
-+}
-+
- static noinline_for_stack int
- ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
- {
-@@ -2176,6 +2264,7 @@ repeat:
-                * from the goal value specified
-                */
-               group = ac->ac_g_ex.fe_group;
-+              ac->ac_prefetch = group;
-               for (i = 0; i < ngroups; group++, i++) {
-                       int ret = 0;
-@@ -2188,6 +2277,8 @@ repeat:
-                       if (group >= ngroups)
-                               group = 0;
-+                      ext4_mb_prefetch(ac, group);
-+
-                       /* This now checks without needing the buddy page */
-                       ret = ext4_mb_good_group(ac, group, cr);
-                       if (ret <= 0) {
-@@ -2260,6 +2351,8 @@ repeat:
-               }
-       }
- out:
-+      /* use prefetched bitmaps to init buddy so that read info is not lost */
-+      ext4_mb_prefetch_fini(ac);
-       return err;
- }
-@@ -2832,6 +2925,24 @@ int ext4_mb_init(struct super_block *
-               sbi->s_mb_large_req = sbi->s_stripe * 8;
-               sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-       }
-+      if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
-+              /* a single flex group is supposed to be read by a single IO */
-+              sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
-+              sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
-+      } else {
-+              sbi->s_mb_prefetch = 32;
-+      }
-+      if (sbi->s_mb_prefetch >= ext4_get_groups_count(sb))
-+              sbi->s_mb_prefetch = ext4_get_groups_count(sb);
-+      /* now many real IOs to prefetch within a single allocation at cr=0
-+       * given cr=0 is an CPU-related optimization we shouldn't try to
-+       * load too many groups, at some point we should start to use what
-+       * we've got in memory.
-+       * with an average random access time 5ms, it'd take a second to get
-+       * 200 groups (* N with flex_bg), so let's make this limit 4 */
-+      sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
-+      if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
-+              sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
-       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
-       if (sbi->s_locality_groups == NULL) {
---- linux-4.18/fs/ext4/mballoc.h       2019-11-28 14:55:26.471545362 +0300
-+++ linux-4.18/fs/ext4/mballoc.h       2019-12-02 11:21:57.028104886 +0300
-@@ -177,6 +177,8 @@ struct ext4_allocation_context {
-       struct page *ac_buddy_page;
-       struct ext4_prealloc_space *ac_pa;
-       struct ext4_locality_group *ac_lg;
-+      ext4_group_t ac_prefetch;
-+      int ac_prefetch_ios; /* number of initialied prefetch IO */
- };
- #define AC_STATUS_CONTINUE    1
---- linux-4.18/fs/ext4/super.c 2019-11-28 14:55:26.502545959 +0300
-+++ linux-4.18/fs/ext4/super.c 2019-11-28 20:07:48.104558177 +0300
-@@ -190,6 +190,8 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_bur
- EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
- EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
- EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
-+EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
-+EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
- EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
- EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
- EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
-@@ -223,6 +224,8 @@ static struct attribute *ext4_attrs[]
-       ATTR_LIST(errors_count),
-       ATTR_LIST(first_error_time),
-       ATTR_LIST(last_error_time),
-+      ATTR_LIST(mb_prefetch),
-+      ATTR_LIST(mb_prefetch_limit),
-       NULL,
- };
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-corrupted-inode-block-bitmaps-handling-patches.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
deleted file mode 100644 (file)
index 857b85d..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-Since we could skip corrupt block groups, this patch
-use ext4_warning() intead of ext4_error() to make FS not
-emount RO in default, also fix a leftover from upstream
-commit 163a203ddb36c36d4a1c942
----
- fs/ext4/balloc.c  |  6 +++---
- fs/ext4/ialloc.c  |  8 ++++----
- fs/ext4/mballoc.c | 13 ++++++++++---
- 3 files changed, 17 insertions(+), 10 deletions(-)
-
-Index: linux-3.12.44-52.18/fs/ext4/balloc.c
-===================================================================
---- linux-3.12.44-52.18.orig/fs/ext4/balloc.c
-+++ linux-3.12.44-52.18/fs/ext4/balloc.c
-@@ -364,7 +364,7 @@ void ext4_validate_block_bitmap(struct s
-       blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
-       if (unlikely(blk != 0)) {
-               ext4_unlock_group(sb, block_group);
--              ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
-+              ext4_warning(sb, "bg %u: block %llu: invalid block bitmap",
-                          block_group, blk);
-               set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-               return;
-@@ -372,7 +372,7 @@ void ext4_validate_block_bitmap(struct s
-       if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
-                       desc, bh))) {
-               ext4_unlock_group(sb, block_group);
--              ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
-+              ext4_warning(sb, "bg %u: bad block bitmap checksum", block_group);
-               set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-               return;
-       }
-Index: linux-3.12.44-52.18/fs/ext4/ialloc.c
-===================================================================
---- linux-3.12.44-52.18.orig/fs/ext4/ialloc.c
-+++ linux-3.12.44-52.18/fs/ext4/ialloc.c
-@@ -76,7 +76,7 @@ static unsigned ext4_init_inode_bitmap(s
-       /* If checksum is bad mark all blocks and inodes use to prevent
-        * allocation, essentially implementing a per-group read-only flag. */
-       if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
--              ext4_error(sb, "Checksum bad for group %u", block_group);
-+              ext4_warning(sb, "Checksum bad for group %u", block_group);
-               grp = ext4_get_group_info(sb, block_group);
-               set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-               set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
-@@ -182,8 +182,8 @@ verify:
-                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
-               ext4_unlock_group(sb, block_group);
-               put_bh(bh);
--              ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
--                         "inode_bitmap = %llu", block_group, bitmap_blk);
-+              ext4_warning(sb, "Corrupt inode bitmap - block_group = %u, "
-+                               "inode_bitmap = %llu", block_group, bitmap_blk);
-               grp = ext4_get_group_info(sb, block_group);
-               set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
-               return NULL;
-@@ -320,7 +320,7 @@ out:
-               if (!fatal)
-                       fatal = err;
-       } else {
--              ext4_error(sb, "bit already cleared for inode %lu", ino);
-+              ext4_warning(sb, "bit already cleared for inode %lu", ino);
-               set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
-       }
-Index: linux-3.12.44-52.18/fs/ext4/mballoc.c
-===================================================================
---- linux-3.12.44-52.18.orig/fs/ext4/mballoc.c
-+++ linux-3.12.44-52.18/fs/ext4/mballoc.c
-@@ -752,11 +752,18 @@ int ext4_mb_generate_buddy(struct super_
-       if (free != grp->bb_free) {
-               struct ext4_group_desc *gdp;
-               gdp = ext4_get_group_desc(sb, group, NULL);
--              ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
--                      "%u in gd, %lu pa's\n", (long unsigned int)group,
-+              ext4_warning(sb, "group %lu: %u blocks in bitmap, %u in bb, "
-+                      "%u in gd, %lu pa's block bitmap corrupt",
-+                      (long unsigned int)group,
-                       free, grp->bb_free, ext4_free_group_clusters(sb, gdp),
-                       grp->bb_prealloc_nr);
--              return -EIO;
-+              /*
-+               * If we intend to continue, we consider group descriptor
-+               * corrupt and update bb_free using bitmap value
-+               */
-+              grp->bb_free = free;
-+              set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-+              
-       }
-       mb_set_largest_free_order(sb, grp);
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch
deleted file mode 100644 (file)
index b9d50c2..0000000
+++ /dev/null
@@ -1,783 +0,0 @@
-this patch implements feature which allows ext4 fs users (e.g. Lustre)
-to store data in ext4 dirent.
-data is stored in ext4 dirent after file-name, this space is accounted
-in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
-is present.
-
-make use of dentry->d_fsdata to pass fid to ext4. so no
-changes in ext4_add_entry() interface required.
-
-Index: linux-stage/fs/ext4/dir.c
-===================================================================
---- linux-stage.orig/fs/ext4/dir.c
-+++ linux-stage/fs/ext4/dir.c
-@@ -70,11 +70,11 @@ int __ext4_check_dir_entry(const char *f
-       const int rlen = ext4_rec_len_from_disk(de->rec_len,
-                                               dir->i_sb->s_blocksize);
--      if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
-+      if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
-               error_msg = "rec_len is smaller than minimal";
-       else if (unlikely(rlen % 4 != 0))
-               error_msg = "rec_len % 4 != 0";
--      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
-+      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
-               error_msg = "rec_len is too small for name_len";
-       else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
-@@ -202,7 +202,7 @@ static int ext4_readdir(struct file *fil
-                                * failure will be detected in the
-                                * dirent test below. */
-                               if (ext4_rec_len_from_disk(de->rec_len,
--                                      sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
-+                                  sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
-                                       break;
-                               i += ext4_rec_len_from_disk(de->rec_len,
-                                                           sb->s_blocksize);
-@@ -421,12 +421,17 @@ int ext4_htree_store_dirent(struct file
-       struct fname *fname, *new_fn;
-       struct dir_private_info *info;
-       int len;
-+      int extra_data = 0;
-       info = dir_file->private_data;
-       p = &info->root.rb_node;
-       /* Create and allocate the fname structure */
--      len = sizeof(struct fname) + dirent->name_len + 1;
-+      if (dirent->file_type & EXT4_DIRENT_LUFID)
-+              extra_data = ext4_get_dirent_data_len(dirent);
-+
-+      len = sizeof(struct fname) + dirent->name_len + extra_data + 1;
-+
-       new_fn = kzalloc(len, GFP_KERNEL);
-       if (!new_fn)
-               return -ENOMEM;
-@@ -435,7 +440,7 @@ int ext4_htree_store_dirent(struct file
-       new_fn->inode = le32_to_cpu(dirent->inode);
-       new_fn->name_len = dirent->name_len;
-       new_fn->file_type = dirent->file_type;
--      memcpy(new_fn->name, dirent->name, dirent->name_len);
-+      memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data);
-       new_fn->name[dirent->name_len] = 0;
-       while (*p) {
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -960,6 +960,7 @@ struct ext4_inode_info {
- #define EXT4_MOUNT_ERRORS_MASK                0x00070
- #define EXT4_MOUNT_MINIX_DF           0x00080 /* Mimics the Minix statfs */
- #define EXT4_MOUNT_NOLOAD             0x00100 /* Don't use existing journal*/
-+#define EXT4_MOUNT_DIRDATA            0x00200 /* Data in directory entries*/
- #define EXT4_MOUNT_DATA_FLAGS         0x00C00 /* Mode for data writes: */
- #define EXT4_MOUNT_JOURNAL_DATA               0x00400 /* Write data to journal */
- #define EXT4_MOUNT_ORDERED_DATA               0x00800 /* Flush data before commit */
-@@ -1538,6 +1539,7 @@ static inline void ext4_clear_state_flag
-                                        EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-                                        EXT4_FEATURE_INCOMPAT_MMP |    \
-+                                       EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA)
- #define EXT4_FEATURE_RO_COMPAT_SUPP   (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
-@@ -1644,6 +1646,43 @@ struct ext4_dir_entry_tail {
- #define EXT4_FT_SYMLINK               7
- #define EXT4_FT_MAX           8
-+#define EXT4_FT_MASK          0xf
-+
-+#if EXT4_FT_MAX > EXT4_FT_MASK
-+#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
-+#endif
-+
-+/*
-+ * d_type has 4 unused bits, so it can hold four types data. these different
-+ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
-+ * stored, in flag order, after file-name in ext4 dirent.
-+*/
-+/*
-+ * this flag is added to d_type if ext4 dirent has extra data after
-+ * filename. this data length is variable and length is stored in first byte
-+ * of data. data start after filename NUL byte.
-+ * This is used by Lustre FS.
-+  */
-+#define EXT4_DIRENT_LUFID             0x10
-+
-+#define EXT4_LUFID_MAGIC    0xAD200907UL
-+struct ext4_dentry_param {
-+      __u32  edp_magic;       /* EXT4_LUFID_MAGIC */
-+      char   edp_len;         /* size of edp_data in bytes */
-+      char   edp_data[0];     /* packed array of data */
-+} __packed;
-+
-+static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
-+                                                struct ext4_dentry_param *p)
-+
-+{
-+      if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
-+              return NULL;
-+      if (p && p->edp_magic == EXT4_LUFID_MAGIC)
-+              return &p->edp_len;
-+      else
-+              return NULL;
-+}
- #define EXT4_FT_DIR_CSUM      0xDE
-@@ -1654,8 +1693,11 @@ struct ext4_dir_entry_tail {
-  */
- #define EXT4_DIR_PAD                  4
- #define EXT4_DIR_ROUND                        (EXT4_DIR_PAD - 1)
--#define EXT4_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXT4_DIR_ROUND) & \
-+#define __EXT4_DIR_REC_LEN(name_len)  (((name_len) + 8 + EXT4_DIR_ROUND) & \
-                                        ~EXT4_DIR_ROUND)
-+#define EXT4_DIR_REC_LEN(de)          (__EXT4_DIR_REC_LEN((de)->name_len +\
-+                                      ext4_get_dirent_data_len(de)))
-+
- #define EXT4_MAX_REC_LEN              ((1<<16)-1)
- /*
-@@ -1991,11 +2033,11 @@ extern int ext4_find_dest_de(struct inod
-                            struct buffer_head *bh,
-                            void *buf, int buf_size,
-                            const char *name, int namelen,
--                           struct ext4_dir_entry_2 **dest_de);
-+                           struct ext4_dir_entry_2 **dest_de, int *dlen);
- void ext4_insert_dentry(struct inode *inode,
-                       struct ext4_dir_entry_2 *de,
-                       int buf_size,
--                      const char *name, int namelen);
-+                      const char *name, int namelen, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-       if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
-@@ -2008,11 +2050,18 @@ static unsigned char ext4_filetype_table
- static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
- {
-+      int fl_index = filetype & EXT4_FT_MASK;
-+
-       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
--          (filetype >= EXT4_FT_MAX))
-+          (fl_index >= EXT4_FT_MAX))
-               return DT_UNKNOWN;
--      return ext4_filetype_table[filetype];
-+      if (!test_opt(sb, DIRDATA))
-+              return ext4_filetype_table[fl_index];
-+
-+      return (ext4_filetype_table[fl_index]) |
-+              (filetype & EXT4_DIRENT_LUFID);
-+
- }
- /* fsync.c */
-@@ -2158,6 +2207,8 @@ extern struct inode *ext4_create_inode(h
- extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
-                            struct ext4_dir_entry_2 *de_del,
-                            struct buffer_head *bh);
-+extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                             struct inode *inode, const void *, const void *);
- extern int search_dir(struct buffer_head *bh,
-                     char *search_buf,
-                     int buf_size,
-@@ -2834,6 +2885,36 @@ extern struct mutex ext4__aio_mutex[EXT4
- extern int ext4_resize_begin(struct super_block *sb);
- extern void ext4_resize_end(struct super_block *sb);
-+/*
-+ * Compute the total directory entry data length.
-+ * This includes the filename and an implicit NUL terminator (always present),
-+ * and optional extensions.  Each extension has a bit set in the high 4 bits of
-+ * de->file_type, and the extension length is the first byte in each entry.
-+ */
-+static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
-+{
-+      char *len = de->name + de->name_len + 1 /* NUL terminator */;
-+      int dlen = 0;
-+      __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
-+      struct ext4_dir_entry_tail *t = (struct ext4_dir_entry_tail *)de;
-+
-+      if (!t->det_reserved_zero1 &&
-+          le16_to_cpu(t->det_rec_len) ==
-+              sizeof(struct ext4_dir_entry_tail) &&
-+          !t->det_reserved_zero2 &&
-+          t->det_reserved_ft == EXT4_FT_DIR_CSUM)
-+              return 0;
-+
-+      while (extra_data_flags) {
-+              if (extra_data_flags & 1) {
-+                      dlen += *len + (dlen == 0);
-+                      len += *len;
-+              }
-+              extra_data_flags >>= 1;
-+      }
-+      return dlen;
-+}
-+
- #endif        /* __KERNEL__ */
- #endif        /* _EXT4_H */
-Index: linux-stage/fs/ext4/namei.c
-===================================================================
---- linux-stage.orig/fs/ext4/namei.c
-+++ linux-stage/fs/ext4/namei.c
-@@ -239,7 +239,8 @@ static unsigned dx_get_count(struct dx_e
- static unsigned dx_get_limit(struct dx_entry *entries);
- static void dx_set_count(struct dx_entry *entries, unsigned value);
- static void dx_set_limit(struct dx_entry *entries, unsigned value);
--static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize);
- static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(const struct qstr *d_name,
-                                struct inode *dir,
-@@ -379,22 +380,23 @@ static struct dx_countlimit *get_dx_coun
- {
-       struct ext4_dir_entry *dp;
-       struct dx_root_info *root;
--      int count_offset;
-+      int count_offset, dot_rec_len, dotdot_rec_len;
-       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
-               count_offset = 8;
--      else if (le16_to_cpu(dirent->rec_len) == 12) {
--              dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
-+      else {
-+              dot_rec_len = le16_to_cpu(dirent->rec_len);
-+              dp = (struct ext4_dir_entry *)(((void *)dirent) + dot_rec_len);
-               if (le16_to_cpu(dp->rec_len) !=
--                  EXT4_BLOCK_SIZE(inode->i_sb) - 12)
-+                  EXT4_BLOCK_SIZE(inode->i_sb) - dot_rec_len)
-                       return NULL;
--              root = (struct dx_root_info *)(((void *)dp + 12));
-+              dotdot_rec_len = EXT4_DIR_REC_LEN((struct ext4_dir_entry_2 *)dp);
-+              root = (struct dx_root_info *)(((void *)dp + dotdot_rec_len));
-               if (root->reserved_zero ||
-                   root->info_length != sizeof(struct dx_root_info))
-                       return NULL;
--              count_offset = 32;
--      } else
--              return NULL;
-+              count_offset = 8 + dot_rec_len + dotdot_rec_len;
-+      }
-       if (offset)
-               *offset = count_offset;
-@@ -500,11 +501,12 @@ ext4_next_entry(struct ext4_dir_entry_2
-  */
- struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
- {
-+      BUG_ON(de->name_len != 1);
-       /* get dotdot first */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       /* dx root info is after dotdot entry */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       return (struct dx_root_info *)de;
- }
-@@ -549,10 +551,16 @@ static inline void dx_set_limit(struct d
-       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
- }
--static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
--              EXT4_DIR_REC_LEN(2) - infosize;
-+      struct ext4_dir_entry_2 *dotdot_de;
-+      unsigned entry_space;
-+
-+      BUG_ON(dot_de->name_len != 1);
-+      dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
-+      entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) -
-+                       EXT4_DIR_REC_LEN(dotdot_de) - infosize;
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -561,7 +569,7 @@ static inline unsigned dx_root_limit(str
- static inline unsigned dx_node_limit(struct inode *dir)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
-+      unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -611,7 +619,7 @@ static struct stats dx_show_leaf(struct
-                               printk(":%x.%u ", h.hash,
-                                      (unsigned) ((char *) de - base));
-                       }
--                      space += EXT4_DIR_REC_LEN(de->name_len);
-+                      space += EXT4_DIR_REC_LEN(de);
-                       names++;
-               }
-               de = ext4_next_entry(de, size);
-@@ -719,12 +727,15 @@ dx_probe(const struct qstr *d_name, stru
-       entries = (struct dx_entry *)(((char *)info) + info->info_length);
--      if (dx_get_limit(entries) != dx_root_limit(dir,
--                                                 info->info_length)) {
-+      if (dx_get_limit(entries) !=
-+          dx_root_limit(dir, (struct ext4_dir_entry_2 *)bh->b_data,
-+                        info->info_length)) {
-               ext4_warning(dir->i_sb, "dx entry: limit != root limit "
-                            "inode #%lu: dx entry: limit %u != root limit %u",
-                            dir->i_ino, dx_get_limit(entries),
--                           dx_root_limit(dir, info->info_length));
-+                           dx_root_limit(dir,
-+                                        (struct ext4_dir_entry_2 *)bh->b_data,
-+                                        info->info_length));
-               brelse(bh);
-               *err = ERR_BAD_DX_DIR;
-               goto fail;
-@@ -916,7 +927,7 @@ static int htree_dirblock_to_tree(struct
-       de = (struct ext4_dir_entry_2 *) bh->b_data;
-       top = (struct ext4_dir_entry_2 *) ((char *) de +
-                                          dir->i_sb->s_blocksize -
--                                         EXT4_DIR_REC_LEN(0));
-+                                         __EXT4_DIR_REC_LEN(0));
-       for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
-               if (ext4_check_dir_entry(dir, NULL, de, bh,
-                               bh->b_data, bh->b_size,
-@@ -1508,7 +1519,7 @@ dx_move_dirents(char *from, char *to, st
-       while (count--) {
-               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
-                                               (from + (map->offs<<2));
--              rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+              rec_len = EXT4_DIR_REC_LEN(de);
-               memcpy (to, de, rec_len);
-               ((struct ext4_dir_entry_2 *) to)->rec_len =
-                               ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1532,7 +1543,7 @@ static struct ext4_dir_entry_2* dx_pack_
-       while ((char*)de < base + blocksize) {
-               next = ext4_next_entry(de, blocksize);
-               if (de->inode && de->name_len) {
--                      rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+                      rec_len = EXT4_DIR_REC_LEN(de);
-                       if (de > to)
-                               memmove(to, de, rec_len);
-                       to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1663,14 +1674,16 @@ int ext4_find_dest_de(struct inode *dir,
-                     struct buffer_head *bh,
-                     void *buf, int buf_size,
-                     const char *name, int namelen,
--                    struct ext4_dir_entry_2 **dest_de)
-+                    struct ext4_dir_entry_2 **dest_de, int *dlen)
- {
-       struct ext4_dir_entry_2 *de;
--      unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
-+      unsigned short reclen = __EXT4_DIR_REC_LEN(namelen) +
-+                                                      (dlen ? *dlen : 0);
-       int nlen, rlen;
-       unsigned int offset = 0;
-       char *top;
-+      dlen ? *dlen = 0 : 0; /* default set to 0 */
-       de = (struct ext4_dir_entry_2 *)buf;
-       top = buf + buf_size - reclen;
-       while ((char *) de <= top) {
-@@ -1679,10 +1692,26 @@ int ext4_find_dest_de(struct inode *dir,
-                       return -EIO;
-               if (ext4_match(namelen, name, de))
-                       return -EEXIST;
--              nlen = EXT4_DIR_REC_LEN(de->name_len);
-+              nlen = EXT4_DIR_REC_LEN(de);
-               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-               if ((de->inode ? rlen - nlen : rlen) >= reclen)
-                       break;
-+              /* Then for dotdot entries, check for the smaller space
-+               * required for just the entry, no FID */
-+              if (namelen == 2 && memcmp(name, "..", 2) == 0) {
-+                      if ((de->inode ? rlen - nlen : rlen) >=
-+                          __EXT4_DIR_REC_LEN(namelen)) {
-+                              /* set dlen=1 to indicate not
-+                               * enough space store fid */
-+                              dlen ? *dlen = 1 : 0;
-+                              break;
-+                      }
-+                      /* The new ".." entry must be written over the
-+                       * previous ".." entry, which is the first
-+                       * entry traversed by this scan. If it doesn't
-+                       * fit, something is badly wrong, so -EIO. */
-+                      return -EIO;
-+              }
-               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-               offset += rlen;
-       }
-@@ -1696,12 +1725,12 @@ int ext4_find_dest_de(struct inode *dir,
- void ext4_insert_dentry(struct inode *inode,
-                       struct ext4_dir_entry_2 *de,
-                       int buf_size,
--                      const char *name, int namelen)
-+                      const char *name, int namelen, void *data)
- {
-       int nlen, rlen;
--      nlen = EXT4_DIR_REC_LEN(de->name_len);
-+      nlen = EXT4_DIR_REC_LEN(de);
-       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-       if (de->inode) {
-               struct ext4_dir_entry_2 *de1 =
-@@ -1715,6 +1744,11 @@ void ext4_insert_dentry(struct inode *in
-       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
-       de->name_len = namelen;
-       memcpy(de->name, name, namelen);
-+      if (data) {
-+              de->name[namelen] = 0;
-+              memcpy(&de->name[namelen + 1], data, *(char *)data);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
- }
- /*
-  * Add a new entry into a directory (leaf) block.  If de is non-NULL,
-@@ -1733,15 +1767,20 @@ static int add_dirent_to_buf(handle_t *h
-       int             namelen = dentry->d_name.len;
-       unsigned int    blocksize = dir->i_sb->s_blocksize;
-       int             csum_size = 0;
--      int             err;
-+      int             err, dlen = 0;
-+      unsigned char   *data;
-+      data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
-+                                              dentry->d_fsdata);
-       if (ext4_has_metadata_csum(inode->i_sb))
-               csum_size = sizeof(struct ext4_dir_entry_tail);
-       if (!de) {
-+              if (data)
-+                      dlen = (*data) + 1;
-               err = ext4_find_dest_de(dir, inode,
-                                       bh, bh->b_data, blocksize - csum_size,
--                                      name, namelen, &de);
-+                                      name, namelen, &de, &dlen);
-               if (err)
-                       return err;
-       }
-@@ -1753,7 +1792,10 @@ static int add_dirent_to_buf(handle_t *h
-       }
-       /* By now the buffer is marked for journaling */
--      ext4_insert_dentry(inode, de, blocksize, name, namelen);
-+      /* If writing the short form of "dotdot", don't add the data section */
-+      if (dlen == 1)
-+              data = NULL;
-+      ext4_insert_dentry(inode, de, blocksize, name, namelen, data);
-       /*
-        * XXX shouldn't update any times until successful
-@@ -1864,7 +1906,8 @@ static int make_indexed_dir(handle_t *ha
-       dx_set_block(entries, 1);
-       dx_set_count(entries, 1);
--      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
-+      dx_set_limit(entries, dx_root_limit(dir,
-+                                       dot_de, sizeof(*dx_info)));
-       /* Initialize as for dx_probe */
-       hinfo.hash_version = dx_info->hash_version;
-@@ -1907,6 +1950,8 @@ static int ext4_update_dotdot(handle_t *
-       struct buffer_head *dir_block;
-       struct ext4_dir_entry_2 *de;
-       int len, journal = 0, err = 0;
-+      int dlen = 0;
-+      char *data;
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-@@ -1922,19 +1967,24 @@ static int ext4_update_dotdot(handle_t *
-       /* the first item must be "." */
-       assert(de->name_len == 1 && de->name[0] == '.');
-       len = le16_to_cpu(de->rec_len);
--      assert(len >= EXT4_DIR_REC_LEN(1));
--      if (len > EXT4_DIR_REC_LEN(1)) {
-+      assert(len >= __EXT4_DIR_REC_LEN(1));
-+      if (len > __EXT4_DIR_REC_LEN(1)) {
-               BUFFER_TRACE(dir_block, "get_write_access");
-               err = ext4_journal_get_write_access(handle, dir_block);
-               if (err)
-                       goto out_journal;
-               journal = 1;
--              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
-+              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-       }
--      len -= EXT4_DIR_REC_LEN(1);
--      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
-+      len -= EXT4_DIR_REC_LEN(de);
-+      data = ext4_dentry_get_data(dir->i_sb,
-+                      (struct ext4_dentry_param *)dentry->d_fsdata);
-+      if (data)
-+              dlen = *data + 1;
-+      assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
-+
-       de = (struct ext4_dir_entry_2 *)
-                       ((char *) de + le16_to_cpu(de->rec_len));
-       if (!journal) {
-@@ -1948,10 +1998,15 @@ static int ext4_update_dotdot(handle_t *
-       if (len > 0)
-               de->rec_len = cpu_to_le16(len);
-       else
--              assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
-+              assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
-       de->name_len = 2;
-       strcpy(de->name, "..");
--      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+      if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[2 + 1], data, *data);
-+              ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
- out_journal:
-       if (journal) {
-@@ -2463,37 +2518,70 @@ err_unlock_inode:
-       return err;
- }
-+struct tp_block {
-+      struct inode *inode;
-+      void *data1;
-+      void *data2;
-+};
-+
- struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
-                         struct ext4_dir_entry_2 *de,
-                         int blocksize, int csum_size,
-                         unsigned int parent_ino, int dotdot_real_len)
- {
-+      void *data1 = NULL, *data2 = NULL;
-+      int dot_reclen = 0;
-+
-+      if (dotdot_real_len == 10) {
-+              struct tp_block *tpb = (struct tp_block *)inode;
-+              data1 = tpb->data1;
-+              data2 = tpb->data2;
-+              inode = tpb->inode;
-+              dotdot_real_len = 0;
-+      }
-       de->inode = cpu_to_le32(inode->i_ino);
-       de->name_len = 1;
--      de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
--                                         blocksize);
-       strcpy(de->name, ".");
-       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      /* get packed fid data*/
-+      data1 = ext4_dentry_get_data(inode->i_sb,
-+                              (struct ext4_dentry_param *) data1);
-+      if (data1) {
-+              de->name[1] = 0;
-+              memcpy(&de->name[2], data1, *(char *) data1);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-+      de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-+      dot_reclen = cpu_to_le16(de->rec_len);
-       de = ext4_next_entry(de, blocksize);
-       de->inode = cpu_to_le32(parent_ino);
-       de->name_len = 2;
-+      strcpy(de->name, "..");
-+      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      data2 = ext4_dentry_get_data(inode->i_sb,
-+                      (struct ext4_dentry_param *) data2);
-+      if (data2) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[3], data2, *(char *) data2);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-       if (!dotdot_real_len)
-               de->rec_len = ext4_rec_len_to_disk(blocksize -
--                                      (csum_size + EXT4_DIR_REC_LEN(1)),
-+                                      (csum_size + dot_reclen),
-                                       blocksize);
-       else
-               de->rec_len = ext4_rec_len_to_disk(
--                              EXT4_DIR_REC_LEN(de->name_len), blocksize);
--      strcpy(de->name, "..");
--      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+                              EXT4_DIR_REC_LEN(de), blocksize);
-       return ext4_next_entry(de, blocksize);
- }
- static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
--                           struct inode *inode)
-+                           struct inode *inode,
-+                           const void *data1, const void *data2)
- {
-+      struct tp_block param;
-       struct buffer_head *dir_block = NULL;
-       struct ext4_dir_entry_2 *de;
-       struct ext4_dir_entry_tail *t;
-@@ -2522,7 +2610,11 @@ static int ext4_init_new_dir(handle_t *h
-       if (err)
-               goto out;
-       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
--      ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
-+      param.inode = inode;
-+      param.data1 = (void *)data1;
-+      param.data2 = (void *)data2;
-+      ext4_init_dot_dotdot((struct inode *)(&param), de, blocksize,
-+                           csum_size, dir->i_ino, 10);
-       set_nlink(inode, 2);
-       if (csum_size) {
-               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
-@@ -2539,6 +2631,29 @@ out:
-       return err;
- }
-+/* Initialize @inode as a subdirectory of @dir, and add the
-+ * "." and ".." entries into the first directory block. */
-+int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                      struct inode *inode,
-+                      const void *data1, const void *data2)
-+{
-+      int rc;
-+
-+      if (IS_ERR(handle))
-+              return PTR_ERR(handle);
-+
-+      if (IS_DIRSYNC(dir))
-+              ext4_handle_sync(handle);
-+
-+      inode->i_op = &ext4_dir_inode_operations;
-+      inode->i_fop = &ext4_dir_operations;
-+      rc = ext4_init_new_dir(handle, dir, inode, data1, data2);
-+      if (!rc)
-+              rc = ext4_mark_inode_dirty(handle, inode);
-+      return rc;
-+}
-+EXPORT_SYMBOL(ext4_add_dot_dotdot);
-+
- static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
- {
-       handle_t *handle;
-@@ -2563,7 +2678,7 @@ retry:
-       inode->i_op = &ext4_dir_inode_operations;
-       inode->i_fop = &ext4_dir_operations;
--      err = ext4_init_new_dir(handle, dir, inode);
-+      err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
-       if (err)
-               goto out_clear_inode;
-       err = ext4_mark_inode_dirty(handle, inode);
-@@ -2615,7 +2730,7 @@ static int empty_dir(struct inode *inode
-       }
-       sb = inode->i_sb;
--      if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
-+      if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) {
-               EXT4_ERROR_INODE(inode, "invalid size");
-               return 1;
-       }
-Index: linux-stage/fs/ext4/inline.c
-===================================================================
---- linux-stage.orig/fs/ext4/inline.c
-+++ linux-stage/fs/ext4/inline.c
-@@ -998,17 +998,17 @@ static int ext4_add_dirent_to_inline(han
-       int             err;
-       struct ext4_dir_entry_2 *de;
--      reclen = EXT4_DIR_REC_LEN(namelen);
-+      reclen = __EXT4_DIR_REC_LEN(namelen);
-       err = ext4_find_dest_de(dir, inode, iloc->bh,
-                               inline_start, inline_size,
--                              name, namelen, &de);
-+                              name, namelen, &de, NULL);
-       if (err)
-               return err;
-       err = ext4_journal_get_write_access(handle, iloc->bh);
-       if (err)
-               return err;
--      ext4_insert_dentry(inode, de, inline_size, name, namelen);
-+      ext4_insert_dentry(inode, de, inline_size, name, namelen, NULL);
-       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
-@@ -1078,7 +1078,7 @@ static int ext4_update_inline_dir(handle
-       int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
-       int new_size = get_max_inline_xattr_value_size(dir, iloc);
--      if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
-+      if (new_size - old_size <= __EXT4_DIR_REC_LEN(1))
-               return -ENOSPC;
-       ret = ext4_update_inline_data(handle, dir,
-@@ -1347,7 +1347,7 @@ int htree_inlinedir_to_tree(struct file
-                       fake.name_len = 1;
-                       strcpy(fake.name, ".");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1357,7 +1357,7 @@ int htree_inlinedir_to_tree(struct file
-                       fake.name_len = 2;
-                       strcpy(fake.name, "..");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1452,8 +1452,8 @@ int ext4_read_inline_dir(struct file *fi
-        * So we will use extra_offset and extra_size to indicate them
-        * during the inline dir iteration.
-        */
--      dotdot_offset = EXT4_DIR_REC_LEN(1);
--      dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
-+      dotdot_offset = __EXT4_DIR_REC_LEN(1);
-+      dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2);
-       extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
-       extra_size = extra_offset + inline_size;
-@@ -1488,7 +1488,7 @@ int ext4_read_inline_dir(struct file *fi
-                        * failure will be detected in the
-                        * dirent test below. */
-                       if (ext4_rec_len_from_disk(de->rec_len, extra_size)
--                              < EXT4_DIR_REC_LEN(1))
-+                              < __EXT4_DIR_REC_LEN(1))
-                               break;
-                       i += ext4_rec_len_from_disk(de->rec_len,
-                                                   extra_size);
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -1133,7 +1133,7 @@ enum {
-       Opt_data_err_abort, Opt_data_err_ignore,
-       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
--      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
-+      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
-       Opt_usrquota, Opt_grpquota, Opt_i_version,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
-       Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
-@@ -1199,6 +1199,7 @@ static const match_table_t tokens = {
-       {Opt_stripe, "stripe=%u"},
-       {Opt_delalloc, "delalloc"},
-       {Opt_nodelalloc, "nodelalloc"},
-+      {Opt_dirdata, "dirdata"},
-       {Opt_removed, "mblk_io_submit"},
-       {Opt_removed, "nomblk_io_submit"},
-       {Opt_block_validity, "block_validity"},
-@@ -1409,6 +1410,7 @@ static const struct mount_opts {
-       {Opt_usrjquota, 0, MOPT_Q},
-       {Opt_grpjquota, 0, MOPT_Q},
-       {Opt_offusrjquota, 0, MOPT_Q},
-+      {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
-       {Opt_offgrpjquota, 0, MOPT_Q},
-       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
-       {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch
deleted file mode 100644 (file)
index 6aff973..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-Index: linux-3.12.39-47.1/fs/ext4/inode.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/inode.c
-+++ linux-3.12.39-47.1/fs/ext4/inode.c
-@@ -4166,11 +4166,11 @@ struct inode *ext4_iget(struct super_blo
-       EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
-       EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
--      inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
-+      ei->i_fs_version = le32_to_cpu(raw_inode->i_disk_version);
-       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
-               if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
--                      inode->i_version |=
--                      (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
-+                      ei->i_fs_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi))
-+                                                                       << 32;
-       }
-       ret = 0;
-@@ -4393,11 +4393,11 @@ static int ext4_do_update_inode(handle_t
-                       raw_inode->i_block[block] = ei->i_data[block];
-       }
--      raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
-+      raw_inode->i_disk_version = cpu_to_le32(ei->i_fs_version);
-       if (ei->i_extra_isize) {
-               if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
--                      raw_inode->i_version_hi =
--                      cpu_to_le32(inode->i_version >> 32);
-+                      raw_inode->i_version_hi = cpu_to_le32(ei->i_fs_version
-+                                                            >> 32);
-               raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-       }
-Index: linux-3.12.39-47.1/fs/ext4/ialloc.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/ialloc.c
-+++ linux-3.12.39-47.1/fs/ext4/ialloc.c
-@@ -973,6 +973,7 @@ got:
-       ei->i_dtime = 0;
-       ei->i_block_group = group;
-       ei->i_last_alloc_group = ~0;
-+      ei->i_fs_version = 0;
-       ext4_set_inode_flags(inode);
-       if (IS_DIRSYNC(inode))
-Index: linux-3.12.39-47.1/fs/ext4/ext4.h
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/ext4.h
-+++ linux-3.12.39-47.1/fs/ext4/ext4.h
-@@ -929,6 +929,8 @@ struct ext4_inode_info {
-       tid_t i_sync_tid;
-       tid_t i_datasync_tid;
-+      __u64 i_fs_version;
-+
-       /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
-       __u32 i_csum_seed;
- };
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch
deleted file mode 100644 (file)
index 6dfcb5c..0000000
+++ /dev/null
@@ -1,1092 +0,0 @@
-This patch implements the large EA support in ext4. If the size of
-an EA value is larger than the blocksize, then the EA value would
-not be saved in the external EA block, instead it would be saved
-in an external EA inode. So, the patch also helps support a larger
-number of EAs.
-
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1545,6 +1545,7 @@ static inline void ext4_clear_state_flag
-                                        EXT4_FEATURE_INCOMPAT_EXTENTS| \
-                                        EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-+                                       EXT4_FEATURE_INCOMPAT_EA_INODE| \
-                                        EXT4_FEATURE_INCOMPAT_MMP |    \
-                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA)
-@@ -1956,6 +1963,10 @@ struct mmpd_data {
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND    noreturn,
-+struct ext4_xattr_ino_array {
-+      unsigned int xia_count;         /* # of used item in the array */
-+      unsigned int xia_inodes[0];
-+};
- /* bitmap.c */
- extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
- void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
-@@ -2169,6 +2180,7 @@ extern void ext4_set_inode_flags(struct
- extern void ext4_get_inode_flags(struct ext4_inode_info *);
- extern int ext4_alloc_da_blocks(struct inode *inode);
- extern void ext4_set_aops(struct inode *inode);
-+extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int chunk);
- extern int ext4_writepage_trans_blocks(struct inode *);
- extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
- extern int ext4_block_truncate_page(handle_t *handle,
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -135,8 +135,6 @@ static void ext4_invalidatepage(struct p
-                               unsigned int length);
- static int __ext4_journalled_writepage(struct page *page, unsigned int len);
- static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
--static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
--                                int pextents);
- /*
-  * Test whether an inode is a fast symlink.
-@@ -182,6 +180,8 @@ void ext4_evict_inode(struct inode *inod
- {
-       handle_t *handle;
-       int err;
-+      int extra_credits = 3;
-+      struct ext4_xattr_ino_array *lea_ino_array = NULL;
-       trace_ext4_evict_inode(inode);
-@@ -235,8 +235,8 @@ void ext4_evict_inode(struct inode *inod
-        * protection against it
-        */
-       sb_start_intwrite(inode->i_sb);
--      handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
--                                  ext4_blocks_for_truncate(inode)+3);
-+
-+      handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, extra_credits);
-       if (IS_ERR(handle)) {
-               ext4_std_error(inode->i_sb, PTR_ERR(handle));
-               /*
-@@ -251,6 +251,32 @@ void ext4_evict_inode(struct inode *inod
-       if (IS_SYNC(inode))
-               ext4_handle_sync(handle);
-+
-+      /* Delete xattr inode before deleting the main inode. */
-+      err = ext4_xattr_delete_inode(handle, inode, &lea_ino_array);
-+      if (err) {
-+              ext4_warning(inode->i_sb,
-+                           "couldn't delete inode's xattr (err %d)", err);
-+              goto stop_handle;
-+      }
-+
-+      if (!IS_NOQUOTA(inode))
-+              extra_credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+      if (!ext4_handle_has_enough_credits(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits)) {
-+              err = ext4_journal_extend(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits);
-+              if (err > 0)
-+                      err = ext4_journal_restart(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits);
-+              if (err != 0) {
-+                      ext4_warning(inode->i_sb,
-+                                   "couldn't extend journal (err %d)", err);
-+                      goto stop_handle;
-+              }
-+      }
-+
-       inode->i_size = 0;
-       err = ext4_mark_inode_dirty(handle, inode);
-       if (err) {
-@@ -269,10 +296,10 @@ void ext4_evict_inode(struct inode *inod
-        * enough credits left in the handle to remove the inode from
-        * the orphan list and set the dtime field.
-        */
--      if (!ext4_handle_has_enough_credits(handle, 3)) {
--              err = ext4_journal_extend(handle, 3);
-+      if (!ext4_handle_has_enough_credits(handle, extra_credits)) {
-+              err = ext4_journal_extend(handle, extra_credits);
-               if (err > 0)
--                      err = ext4_journal_restart(handle, 3);
-+                      err = ext4_journal_restart(handle, extra_credits);
-               if (err != 0) {
-                       ext4_warning(inode->i_sb,
-                                    "couldn't extend journal (err %d)", err);
-@@ -307,6 +334,9 @@ void ext4_evict_inode(struct inode *inod
-               ext4_free_inode(handle, inode);
-       ext4_journal_stop(handle);
-       sb_end_intwrite(inode->i_sb);
-+
-+      if (lea_ino_array != NULL)
-+              ext4_xattr_inode_array_free(inode, lea_ino_array);
-       return;
- no_delete:
-       ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
-@@ -4777,7 +4807,7 @@ static int ext4_index_trans_blocks(struc
-  *
-  * Also account for superblock, inode, quota and xattr blocks
-  */
--static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
-+int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
-                                 int pextents)
- {
-       ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
-Index: linux-stage/fs/ext4/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext4/xattr.c
-+++ linux-stage/fs/ext4/xattr.c
-@@ -201,6 +201,7 @@ ext4_xattr_check_names(struct ext4_xattr
-       while (!IS_LAST_ENTRY(entry)) {
-               if (entry->e_value_size != 0 &&
-+                  entry->e_value_inum == 0 &&
-                   (value_start + le16_to_cpu(entry->e_value_offs) <
-                    (void *)e + sizeof(__u32) ||
-                    value_start + le16_to_cpu(entry->e_value_offs) +
-@@ -233,19 +233,26 @@ ext4_xattr_check_block(struct inode *ino
- }
- static inline int
--ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
-+ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
-+                     struct inode *inode)
- {
-       size_t value_size = le32_to_cpu(entry->e_value_size);
--      if (entry->e_value_block != 0 || value_size > size ||
-+      if (!entry->e_value_inum &&
-           le16_to_cpu(entry->e_value_offs) + value_size > size)
-+              return -EIO;
-+      if (entry->e_value_inum &&
-+          (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
-+           le32_to_cpu(entry->e_value_inum) >
-+           le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
-               return -EIO;
-       return 0;
- }
- static int
- ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
--                    const char *name, size_t size, int sorted)
-+                    const char *name, size_t size, int sorted,
-+                    struct inode *inode)
- {
-       struct ext4_xattr_entry *entry;
-       size_t name_len;
-@@ -265,11 +272,109 @@ ext4_xattr_find_entry(struct ext4_xattr_
-                       break;
-       }
-       *pentry = entry;
--      if (!cmp && ext4_xattr_check_entry(entry, size))
-+      if (!cmp && ext4_xattr_check_entry(entry, size, inode))
-                       return -EIO;
-       return cmp ? -ENODATA : 0;
- }
-+/*
-+ * Read the EA value from an inode.
-+ */
-+static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
-+{
-+      unsigned long block = 0;
-+      struct buffer_head *bh = NULL;
-+      int err, blocksize;
-+      size_t csize, ret_size = 0;
-+
-+      if (*size == 0)
-+              return 0;
-+
-+      blocksize = ea_inode->i_sb->s_blocksize;
-+
-+      while (ret_size < *size) {
-+              csize = (*size - ret_size) > blocksize ? blocksize :
-+                                                      *size - ret_size;
-+              bh = ext4_bread(NULL, ea_inode, block, 0, &err);
-+              if (!bh) {
-+                      *size = ret_size;
-+                      return err;
-+              }
-+              memcpy(buf, bh->b_data, csize);
-+              brelse(bh);
-+
-+              buf += csize;
-+              block += 1;
-+              ret_size += csize;
-+      }
-+
-+      *size = ret_size;
-+
-+      return err;
-+}
-+
-+/*
-+ * Fetch the xattr inode from disk.
-+ *
-+ * The xattr inode stores the parent inode number and generation so that
-+ * the kernel and e2fsck can verify the xattr inode is valid upon access.
-+ */
-+struct inode *ext4_xattr_inode_iget(struct inode *parent,
-+                                  unsigned long ea_ino, int *err)
-+{
-+      struct inode *ea_inode = NULL;
-+
-+      ea_inode = ext4_iget(parent->i_sb, ea_ino);
-+      if (IS_ERR(ea_inode) || is_bad_inode(ea_inode)) {
-+              int rc = IS_ERR(ea_inode) ? PTR_ERR(ea_inode) : 0;
-+              ext4_error(parent->i_sb, "error while reading EA inode %lu "
-+                         "/ %d %d", ea_ino, rc, is_bad_inode(ea_inode));
-+              *err = rc != 0 ? rc : -EIO;
-+              return NULL;
-+      }
-+
-+      if (EXT4_XATTR_INODE_GET_PARENT(ea_inode) != parent->i_ino ||
-+          ea_inode->i_generation != parent->i_generation) {
-+              ext4_error(parent->i_sb, "Backpointer from EA inode %lu "
-+                         "to parent invalid.", ea_ino);
-+              *err = -EINVAL;
-+              goto error;
-+      }
-+
-+      if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
-+              ext4_error(parent->i_sb, "EA inode %lu does not have "
-+                         "EXT4_EA_INODE_FL flag set.\n", ea_ino);
-+              *err = -EINVAL;
-+              goto error;
-+      }
-+
-+      *err = 0;
-+      return ea_inode;
-+
-+error:
-+      iput(ea_inode);
-+      return NULL;
-+}
-+
-+/*
-+ * Read the value from the EA inode.
-+ */
-+static int ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino,
-+                              void *buffer, size_t *size)
-+{
-+      struct inode *ea_inode = NULL;
-+      int err;
-+
-+      ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+      if (err)
-+              return err;
-+
-+      err = ext4_xattr_inode_read(ea_inode, buffer, size);
-+      iput(ea_inode);
-+
-+      return err;
-+}
-+
- static int
- ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
-                    void *buffer, size_t buffer_size)
-@@ -301,7 +400,8 @@ bad_block:
-       }
-       ext4_xattr_cache_insert(bh);
-       entry = BFIRST(bh);
--      error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-+      error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
-+                                    inode);
-       if (error == -EIO)
-               goto bad_block;
-       if (error)
-@@ -311,8 +411,16 @@ bad_block:
-               error = -ERANGE;
-               if (size > buffer_size)
-                       goto cleanup;
--              memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
--                     size);
-+              if (entry->e_value_inum) {
-+                      error = ext4_xattr_inode_get(inode,
-+                                           le32_to_cpu(entry->e_value_inum),
-+                                           buffer, &size);
-+                      if (error)
-+                              goto cleanup;
-+              } else {
-+                      memcpy(buffer, bh->b_data +
-+                             le16_to_cpu(entry->e_value_offs), size);
-+              }
-       }
-       error = size;
-@@ -346,7 +454,7 @@ ext4_xattr_ibody_get(struct inode *inode
-       if (error)
-               goto cleanup;
-       error = ext4_xattr_find_entry(&entry, name_index, name,
--                                    end - (void *)entry, 0);
-+                                    end - (void *)entry, 0, inode);
-       if (error)
-               goto cleanup;
-       size = le32_to_cpu(entry->e_value_size);
-@@ -354,8 +462,16 @@ ext4_xattr_ibody_get(struct inode *inode
-               error = -ERANGE;
-               if (size > buffer_size)
-                       goto cleanup;
--              memcpy(buffer, (void *)IFIRST(header) +
--                     le16_to_cpu(entry->e_value_offs), size);
-+              if (entry->e_value_inum) {
-+                      error = ext4_xattr_inode_get(inode,
-+                                           le32_to_cpu(entry->e_value_inum),
-+                                           buffer, &size);
-+                      if (error)
-+                              goto cleanup;
-+              } else {
-+                      memcpy(buffer, (void *)IFIRST(header) +
-+                             le16_to_cpu(entry->e_value_offs), size);
-+              }
-       }
-       error = size;
-@@ -597,7 +713,7 @@ static size_t ext4_xattr_free_space(stru
- {
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
-               *total += EXT4_XATTR_LEN(last->e_name_len);
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       size_t offs = le16_to_cpu(last->e_value_offs);
-                       if (offs < *min_offs)
-                               *min_offs = offs;
-@@ -606,16 +722,196 @@ static size_t ext4_xattr_free_space(stru
-       return (*min_offs - ((void *)last - base) - sizeof(__u32));
- }
-+/*
-+ * Write the value of the EA in an inode.
-+ */
- static int
--ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-+ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
-+                     const void *buf, int bufsize)
-+{
-+      struct buffer_head *bh = NULL;
-+      unsigned long block = 0;
-+      unsigned blocksize = ea_inode->i_sb->s_blocksize;
-+      unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
-+      int csize, wsize = 0;
-+      int ret = 0;
-+      int retries = 0;
-+
-+retry:
-+      while (ret >= 0 && ret < max_blocks) {
-+              struct ext4_map_blocks map;
-+              map.m_lblk = block += ret;
-+              map.m_len = max_blocks -= ret;
-+
-+              ret = ext4_map_blocks(handle, ea_inode, &map,
-+                                    EXT4_GET_BLOCKS_CREATE);
-+              if (ret <= 0) {
-+                      ext4_mark_inode_dirty(handle, ea_inode);
-+                      if (ret == -ENOSPC &&
-+                          ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
-+                              ret = 0;
-+                              goto retry;
-+                      }
-+                      break;
-+              }
-+      }
-+
-+      if (ret < 0)
-+              return ret;
-+
-+      block = 0;
-+      while (wsize < bufsize) {
-+              if (bh != NULL)
-+                      brelse(bh);
-+              csize = (bufsize - wsize) > blocksize ? blocksize :
-+                                                              bufsize - wsize;
-+              bh = ext4_getblk(handle, ea_inode, block, 0, &ret);
-+              if (!bh)
-+                      goto out;
-+              ret = ext4_journal_get_write_access(handle, bh);
-+              if (ret)
-+                      goto out;
-+
-+              memcpy(bh->b_data, buf, csize);
-+              set_buffer_uptodate(bh);
-+              ext4_handle_dirty_metadata(handle, ea_inode, bh);
-+
-+              buf += csize;
-+              wsize += csize;
-+              block += 1;
-+      }
-+
-+      i_size_write(ea_inode, wsize);
-+      ext4_update_i_disksize(ea_inode, wsize);
-+
-+      ext4_mark_inode_dirty(handle, ea_inode);
-+
-+out:
-+      brelse(bh);
-+
-+      return ret;
-+}
-+
-+static void ext4_xattr_inode_set_ref(struct inode *ea_inode, __u64 ref_count)
-+{
-+       ea_inode->i_ctime.tv_sec = (__u32)(ref_count >> 32);
-+       ea_inode->i_version = (__u32)ref_count;
-+}
-+
-+static void ext4_xattr_inode_set_hash(struct inode *ea_inode, __u32 hash)
-+{
-+       ea_inode->i_atime.tv_sec = hash;
-+}
-+
-+/*
-+ * Create an inode to store the value of a large EA.
-+ */
-+static struct inode *
-+ext4_xattr_inode_create(handle_t *handle, struct inode *inode, __u32 hash)
-+{
-+      struct inode *ea_inode = NULL;
-+
-+      /*
-+       * Let the next inode be the goal, so we try and allocate the EA inode
-+       * in the same group, or nearby one.
-+       */
-+      ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-+                                S_IFREG|0600, NULL, inode->i_ino + 1, NULL);
-+
-+      if (!IS_ERR(ea_inode)) {
-+              ea_inode->i_op = &ext4_file_inode_operations;
-+              ea_inode->i_fop = &ext4_file_operations;
-+              ext4_set_aops(ea_inode);
-+              ea_inode->i_generation = inode->i_generation;
-+              EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
-+
-+              /*
-+               * A back-pointer from EA inode to parent inode will be useful
-+               * for e2fsck.
-+               */
-+              EXT4_XATTR_INODE_SET_PARENT(ea_inode, inode->i_ino);
-+              unlock_new_inode(ea_inode);
-+
-+               ext4_xattr_inode_set_ref(ea_inode, 1);
-+               ext4_xattr_inode_set_hash(ea_inode, hash);
-+      }
-+
-+      return ea_inode;
-+}
-+
-+/*
-+ * Unlink the inode storing the value of the EA.
-+ */
-+int
-+ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino)
-+{
-+      struct inode *ea_inode = NULL;
-+      int err;
-+
-+      ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+      if (err)
-+              return err;
-+
-+      clear_nlink(ea_inode);
-+      iput(ea_inode);
-+
-+      return 0;
-+}
-+
-+static __u32
-+ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
-+{
-+      if (ext4_has_metadata_csum(sbi->s_sb))
-+              return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
-+      return 0;
-+}
-+
-+/*
-+ * Add value of the EA in an inode.
-+ */
-+static int
-+ext4_xattr_inode_set(handle_t *handle, struct inode *inode, unsigned long *ea_ino,
-+                   const void *value, size_t value_len)
-+{
-+      struct inode *ea_inode = NULL;
-+      __u32 hash;
-+      int err;
-+
-+      /* Create an inode for the EA value */
-+      hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
-+      ea_inode = ext4_xattr_inode_create(handle, inode, hash);
-+      if (IS_ERR(ea_inode))
-+              return -1;
-+
-+      err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
-+      if (err)
-+              clear_nlink(ea_inode);
-+      else
-+              *ea_ino = ea_inode->i_ino;
-+
-+      iput(ea_inode);
-+
-+      return err;
-+}
-+
-+static int
-+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
-+                   handle_t *handle, struct inode *inode)
- {
-       struct ext4_xattr_entry *last;
-       size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-+      int in_inode = i->in_inode;
-+
-+      if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+               EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+          (EXT4_XATTR_SIZE(i->value_len) >
-+           EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
-+              in_inode = 1;
-       /* Compute min_offs and last. */
-       last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       size_t offs = le16_to_cpu(last->e_value_offs);
-                       if (offs < min_offs)
-                               min_offs = offs;
-@@ -623,16 +919,21 @@ ext4_xattr_set_entry(struct ext4_xattr_i
-       }
-       free = min_offs - ((void *)last - s->base) - sizeof(__u32);
-       if (!s->not_found) {
--              if (!s->here->e_value_block && s->here->e_value_size) {
-+              if (!in_inode &&
-+                  !s->here->e_value_inum && s->here->e_value_size) {
-                       size_t size = le32_to_cpu(s->here->e_value_size);
-                       free += EXT4_XATTR_SIZE(size);
-               }
-               free += EXT4_XATTR_LEN(name_len);
-       }
-       if (i->value) {
--              if (free < EXT4_XATTR_SIZE(i->value_len) ||
--                  free < EXT4_XATTR_LEN(name_len) +
--                         EXT4_XATTR_SIZE(i->value_len))
-+              size_t value_len = EXT4_XATTR_SIZE(i->value_len);
-+
-+              if (in_inode)
-+                      value_len = 0;
-+
-+              if (free < value_len ||
-+                  free < EXT4_XATTR_LEN(name_len) + value_len)
-                       return -ENOSPC;
-       }
-@@ -646,7 +947,8 @@ ext4_xattr_set_entry(struct ext4_xattr_i
-               s->here->e_name_len = name_len;
-               memcpy(s->here->e_name, i->name, name_len);
-       } else {
--              if (!s->here->e_value_block && s->here->e_value_size) {
-+              if (!s->here->e_value_inum && s->here->e_value_size &&
-+                  s->here->e_value_offs > 0) {
-                       void *first_val = s->base + min_offs;
-                       size_t offs = le16_to_cpu(s->here->e_value_offs);
-                       void *val = s->base + offs;
-@@ -680,13 +982,18 @@ ext4_xattr_set_entry(struct ext4_xattr_i
-                       last = s->first;
-                       while (!IS_LAST_ENTRY(last)) {
-                               size_t o = le16_to_cpu(last->e_value_offs);
--                              if (!last->e_value_block &&
-+                              if (!last->e_value_inum &&
-                                   last->e_value_size && o < offs)
-                                       last->e_value_offs =
-                                               cpu_to_le16(o + size);
-                               last = EXT4_XATTR_NEXT(last);
-                       }
-               }
-+              if (s->here->e_value_inum) {
-+                      ext4_xattr_inode_unlink(inode,
-+                                      le32_to_cpu(s->here->e_value_inum));
-+                      s->here->e_value_inum = 0;
-+              }
-               if (!i->value) {
-                       /* Remove the old name. */
-                       size_t size = EXT4_XATTR_LEN(name_len);
-@@ -700,10 +1006,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i
-       if (i->value) {
-               /* Insert the new value. */
-               s->here->e_value_size = cpu_to_le32(i->value_len);
--              if (i->value_len) {
-+              if (in_inode) {
-+                      unsigned long ea_ino = le32_to_cpu(s->here->e_value_inum);
-+                      ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
-+                                           i->value_len);
-+                      s->here->e_value_inum = cpu_to_le32(ea_ino);
-+                      s->here->e_value_offs = 0;
-+              } else if (i->value_len) {
-                       size_t size = EXT4_XATTR_SIZE(i->value_len);
-                       void *val = s->base + min_offs - size;
-                       s->here->e_value_offs = cpu_to_le16(min_offs - size);
-+                      s->here->e_value_inum = 0;
-                       if (i->value == EXT4_ZERO_XATTR_VALUE) {
-                               memset(val, 0, size);
-                       } else {
-@@ -753,7 +1066,7 @@ ext4_xattr_block_find(struct inode *inod
-               bs->s.end = bs->bh->b_data + bs->bh->b_size;
-               bs->s.here = bs->s.first;
-               error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
--                                            i->name, bs->bh->b_size, 1);
-+                                            i->name, bs->bh->b_size, 1, inode);
-               if (error && error != -ENODATA)
-                       goto cleanup;
-               bs->s.not_found = error;
-@@ -777,8 +1090,6 @@ ext4_xattr_block_set(handle_t *handle, s
- #define header(x) ((struct ext4_xattr_header *)(x))
--      if (i->value && i->value_len > sb->s_blocksize)
--              return -ENOSPC;
-       if (s->base) {
-               ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
-                                       bs->bh->b_blocknr);
-@@ -794,7 +1105,7 @@ ext4_xattr_block_set(handle_t *handle, s
-                               ce = NULL;
-                       }
-                       ea_bdebug(bs->bh, "modifying in-place");
--                      error = ext4_xattr_set_entry(i, s);
-+                      error = ext4_xattr_set_entry(i, s, handle, inode);
-                       if (!error) {
-                               if (!IS_LAST_ENTRY(s->first))
-                                       ext4_xattr_rehash(header(s->base),
-@@ -845,7 +1156,7 @@ ext4_xattr_block_set(handle_t *handle, s
-               s->end = s->base + sb->s_blocksize;
-       }
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error == -EIO)
-               goto bad_block;
-       if (error)
-@@ -994,7 +1305,7 @@ int ext4_xattr_ibody_find(struct inode *
-               /* Find the named attribute. */
-               error = ext4_xattr_find_entry(&is->s.here, i->name_index,
-                                             i->name, is->s.end -
--                                            (void *)is->s.base, 0);
-+                                            (void *)is->s.base, 0, inode);
-               if (error && error != -ENODATA)
-                       return error;
-               is->s.not_found = error;
-@@ -1012,7 +1323,7 @@ int ext4_xattr_ibody_inline_set(handle_t
-       if (EXT4_I(inode)->i_extra_isize == 0)
-               return -ENOSPC;
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error) {
-               if (error == -ENOSPC &&
-                   ext4_has_inline_data(inode)) {
-@@ -1024,7 +1335,7 @@ int ext4_xattr_ibody_inline_set(handle_t
-                       error = ext4_xattr_ibody_find(inode, i, is);
-                       if (error)
-                               return error;
--                      error = ext4_xattr_set_entry(i, s);
-+                      error = ext4_xattr_set_entry(i, s, handle, inode);
-               }
-               if (error)
-                       return error;
-@@ -1050,7 +1361,7 @@ static int ext4_xattr_ibody_set(handle_t
-       if (EXT4_I(inode)->i_extra_isize == 0)
-               return -ENOSPC;
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error)
-               return error;
-       header = IHDR(inode, ext4_raw_inode(&is->iloc));
-@@ -1086,7 +1397,7 @@ ext4_xattr_set_handle(handle_t *handle,
-               .name = name,
-               .value = value,
-               .value_len = value_len,
--
-+              .in_inode = 0,
-       };
-       struct ext4_xattr_ibody_find is = {
-               .s = { .not_found = -ENODATA, },
-@@ -1151,6 +1462,15 @@ ext4_xattr_set_handle(handle_t *handle,
-                                       goto cleanup;
-                       }
-                       error = ext4_xattr_block_set(handle, inode, &i, &bs);
-+                      if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+                                      EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+                          error == -ENOSPC) {
-+                              /* xattr not fit to block, store at external
-+                               * inode */
-+                              i.in_inode = 1;
-+                              error = ext4_xattr_ibody_set(handle, inode,
-+                                                           &i, &is);
-+                      }
-                       if (error)
-                               goto cleanup;
-                       if (!is.s.not_found) {
-@@ -1197,9 +1517,22 @@ ext4_xattr_set(struct inode *inode, int
-              const void *value, size_t value_len, int flags)
- {
-       handle_t *handle;
-+      struct super_block *sb = inode->i_sb;
-       int error, retries = 0;
-       int credits = ext4_jbd2_credits_xattr(inode);
-+      if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
-+          EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
-+              int nrblocks = (value_len + sb->s_blocksize - 1) >>
-+                                      sb->s_blocksize_bits;
-+
-+              /* For new inode */
-+              credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
-+
-+              /* For data blocks of EA inode */
-+              credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
-+      }
-+
- retry:
-       handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
-       if (IS_ERR(handle)) {
-@@ -1211,7 +1544,7 @@ retry:
-                                             value, value_len, flags);
-               error2 = ext4_journal_stop(handle);
-               if (error == -ENOSPC &&
--                  ext4_should_retry_alloc(inode->i_sb, &retries))
-+                  ext4_should_retry_alloc(sb, &retries))
-                       goto retry;
-               if (error == 0)
-                       error = error2;
-@@ -1233,7 +1566,7 @@ static void ext4_xattr_shift_entries(str
-       /* Adjust the value offsets of the entries */
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       new_offs = le16_to_cpu(last->e_value_offs) +
-                                                       value_offs_shift;
-                       BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
-@@ -1472,21 +1805,135 @@ cleanup:
- }
-+#define EIA_INCR 16 /* must be 2^n */
-+#define EIA_MASK (EIA_INCR - 1)
-+/* Add the large xattr @ino into @lea_ino_array for later deletion.
-+ * If @lea_ino_array is new or full it will be grown and the old
-+ * contents copied over.
-+ */
-+static int
-+ext4_expand_ino_array(struct ext4_xattr_ino_array **lea_ino_array, __u32 ino)
-+{
-+      if (*lea_ino_array == NULL) {
-+              /*
-+               * Start with 15 inodes, so it fits into a power-of-two size.
-+               * If *lea_ino_array is NULL, this is essentially offsetof()
-+               */
-+              (*lea_ino_array) =
-+                      kmalloc(offsetof(struct ext4_xattr_ino_array,
-+                                       xia_inodes[EIA_MASK]),
-+                              GFP_NOFS);
-+              if (*lea_ino_array == NULL)
-+                      return -ENOMEM;
-+              (*lea_ino_array)->xia_count = 0;
-+      } else if (((*lea_ino_array)->xia_count & EIA_MASK) == EIA_MASK) {
-+              /* expand the array once all 15 + n * 16 slots are full */
-+              struct ext4_xattr_ino_array *new_array = NULL;
-+              int count = (*lea_ino_array)->xia_count;
-+
-+              /* if new_array is NULL, this is essentially offsetof() */
-+              new_array = kmalloc(
-+                              offsetof(struct ext4_xattr_ino_array,
-+                                       xia_inodes[count + EIA_INCR]),
-+                              GFP_NOFS);
-+              if (new_array == NULL)
-+                      return -ENOMEM;
-+              memcpy(new_array, *lea_ino_array,
-+                     offsetof(struct ext4_xattr_ino_array,
-+                              xia_inodes[count]));
-+              kfree(*lea_ino_array);
-+              *lea_ino_array = new_array;
-+      }
-+      (*lea_ino_array)->xia_inodes[(*lea_ino_array)->xia_count++] = ino;
-+      return 0;
-+}
-+
-+/**
-+ * Add xattr inode to orphan list
-+ */
-+static int
-+ext4_xattr_inode_orphan_add(handle_t *handle, struct inode *inode,
-+                      int credits, struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+      struct inode *ea_inode = NULL;
-+      int idx = 0, error = 0;
-+
-+      if (lea_ino_array == NULL)
-+              return 0;
-+
-+      for (; idx < lea_ino_array->xia_count; ++idx) {
-+              if (!ext4_handle_has_enough_credits(handle, credits)) {
-+                      error = ext4_journal_extend(handle, credits);
-+                      if (error > 0)
-+                              error = ext4_journal_restart(handle, credits);
-+
-+                      if (error != 0) {
-+                              ext4_warning(inode->i_sb,
-+                                      "couldn't extend journal "
-+                                      "(err %d)", error);
-+                              return error;
-+                      }
-+              }
-+              ea_inode = ext4_xattr_inode_iget(inode,
-+                              lea_ino_array->xia_inodes[idx], &error);
-+              if (error)
-+                      continue;
-+              ext4_orphan_add(handle, ea_inode);
-+              /* the inode's i_count will be released by caller */
-+      }
-+
-+      return 0;
-+}
- /*
-  * ext4_xattr_delete_inode()
-  *
-- * Free extended attribute resources associated with this inode. This
-+ * Free extended attribute resources associated with this inode. Traverse
-+ * all entries and unlink any xattr inodes associated with this inode. This
-  * is called immediately before an inode is freed. We have exclusive
-- * access to the inode.
-+ * access to the inode. If an orphan inode is deleted it will also delete any
-+ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
-+ * to ensure they belong to the parent inode and were not deleted already.
-  */
--void
--ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-+int
-+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+                      struct ext4_xattr_ino_array **lea_ino_array)
- {
-       struct buffer_head *bh = NULL;
-+      struct ext4_xattr_ibody_header *header;
-+      struct ext4_inode *raw_inode;
-+      struct ext4_iloc iloc;
-+      struct ext4_xattr_entry *entry;
-+      int credits = 3, error = 0;
--      if (!EXT4_I(inode)->i_file_acl)
-+      if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
-+              goto delete_external_ea;
-+
-+      error = ext4_get_inode_loc(inode, &iloc);
-+      if (error)
-               goto cleanup;
-+      raw_inode = ext4_raw_inode(&iloc);
-+      header = IHDR(inode, raw_inode);
-+      for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
-+           entry = EXT4_XATTR_NEXT(entry)) {
-+              if (!entry->e_value_inum)
-+                      continue;
-+              if (ext4_expand_ino_array(lea_ino_array,
-+                                        entry->e_value_inum) != 0) {
-+                      brelse(iloc.bh);
-+                      goto cleanup;
-+              }
-+              entry->e_value_inum = 0;
-+      }
-+      brelse(iloc.bh);
-+
-+delete_external_ea:
-+      if (!EXT4_I(inode)->i_file_acl) {
-+              /* add xattr inode to orphan list */
-+              ext4_xattr_inode_orphan_add(handle, inode, credits,
-+                                              *lea_ino_array);
-+              goto cleanup;
-+      }
-       bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-       if (!bh) {
-               EXT4_ERROR_INODE(inode, "block %llu read error",
-@@ -1499,11 +1946,69 @@ ext4_xattr_delete_inode(handle_t *handle
-                                EXT4_I(inode)->i_file_acl);
-               goto cleanup;
-       }
-+
-+      for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
-+           entry = EXT4_XATTR_NEXT(entry)) {
-+              if (!entry->e_value_inum)
-+                      continue;
-+              if (ext4_expand_ino_array(lea_ino_array,
-+                                        entry->e_value_inum) != 0)
-+                      goto cleanup;
-+              entry->e_value_inum = 0;
-+      }
-+
-+      /* add xattr inode to orphan list */
-+      error = ext4_xattr_inode_orphan_add(handle, inode, credits,
-+                                      *lea_ino_array);
-+      if (error != 0)
-+              goto cleanup;
-+
-+      if (!IS_NOQUOTA(inode))
-+              credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+      if (!ext4_handle_has_enough_credits(handle, credits)) {
-+              error = ext4_journal_extend(handle, credits);
-+              if (error > 0)
-+                      error = ext4_journal_restart(handle, credits);
-+              if (error != 0) {
-+                      ext4_warning(inode->i_sb,
-+                              "couldn't extend journal (err %d)", error);
-+                      goto cleanup;
-+              }
-+      }
-+
-       ext4_xattr_release_block(handle, inode, bh);
-       EXT4_I(inode)->i_file_acl = 0;
- cleanup:
-       brelse(bh);
-+
-+      return error;
-+}
-+
-+void
-+ext4_xattr_inode_array_free(struct inode *inode,
-+                          struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+      struct inode    *ea_inode = NULL;
-+      int             idx = 0;
-+      int             err;
-+
-+      if (lea_ino_array == NULL)
-+              return;
-+
-+      for (; idx < lea_ino_array->xia_count; ++idx) {
-+              ea_inode = ext4_xattr_inode_iget(inode,
-+                              lea_ino_array->xia_inodes[idx], &err);
-+              if (err)
-+                      continue;
-+              /* for inode's i_count get from ext4_xattr_delete_inode */
-+              if (!list_empty(&EXT4_I(ea_inode)->i_orphan))
-+                      iput(ea_inode);
-+              clear_nlink(ea_inode);
-+              iput(ea_inode);
-+      }
-+      kfree(lea_ino_array);
- }
- /*
-@@ -1573,10 +2078,9 @@ ext4_xattr_cmp(struct ext4_xattr_header
-                   entry1->e_name_index != entry2->e_name_index ||
-                   entry1->e_name_len != entry2->e_name_len ||
-                   entry1->e_value_size != entry2->e_value_size ||
-+                  entry1->e_value_inum != entry2->e_value_inum ||
-                   memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
-                       return 1;
--              if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
--                      return -EIO;
-               if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
-                          (char *)header2 + le16_to_cpu(entry2->e_value_offs),
-                          le32_to_cpu(entry1->e_value_size)))
-@@ -1660,7 +2164,7 @@ static inline void ext4_xattr_hash_entry
-                      *name++;
-       }
--      if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-+      if (!entry->e_value_inum && entry->e_value_size) {
-               __le32 *value = (__le32 *)((char *)header +
-                       le16_to_cpu(entry->e_value_offs));
-               for (n = (le32_to_cpu(entry->e_value_size) +
-Index: linux-stage/fs/ext4/xattr.h
-===================================================================
---- linux-stage.orig/fs/ext4/xattr.h
-+++ linux-stage/fs/ext4/xattr.h
-@@ -42,7 +42,7 @@ struct ext4_xattr_entry {
-       __u8    e_name_len;     /* length of name */
-       __u8    e_name_index;   /* attribute name index */
-       __le16  e_value_offs;   /* offset in disk block of value */
--      __le32  e_value_block;  /* disk block attribute is stored on (n/i) */
-+      __le32  e_value_inum;   /* inode in which the value is stored */
-       __le32  e_value_size;   /* size of attribute value */
-       __le32  e_hash;         /* hash value of name and value */
-       char    e_name[0];      /* attribute name */
-@@ -67,6 +67,26 @@ struct ext4_xattr_entry {
-               EXT4_I(inode)->i_extra_isize))
- #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-+/*
-+ * Link EA inode back to parent one using i_mtime field.
-+ * Extra integer type conversion added to ignore higher
-+ * bits in i_mtime.tv_sec which might be set by ext4_get()
-+ */
-+#define EXT4_XATTR_INODE_SET_PARENT(inode, inum)      \
-+do {                                                  \
-+      (inode)->i_mtime.tv_sec = inum;                 \
-+} while(0)
-+
-+#define EXT4_XATTR_INODE_GET_PARENT(inode)            \
-+      ((__u32)(inode)->i_mtime.tv_sec)
-+
-+/*
-+ * The minimum size of EA value when you start storing it in an external inode
-+ * size of block - size of header - size of 1 entry - 4 null bytes
-+*/
-+#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b)                                       \
-+      ((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
-+
- #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
- #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
- #define BFIRST(bh) ENTRY(BHDR(bh)+1)
-@@ -75,10 +84,11 @@ struct ext4_xattr_entry {
- #define EXT4_ZERO_XATTR_VALUE ((void *)-1)
- struct ext4_xattr_info {
--      int name_index;
-       const char *name;
-       const void *value;
-       size_t value_len;
-+      int name_index;
-+      int in_inode;
- };
- struct ext4_xattr_search {
-@@ -106,7 +116,13 @@ extern int ext4_xattr_get(struct inode *
- extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
- extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
--extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
-+extern struct inode *ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
-+                                         int *err);
-+extern int ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino);
-+extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+                                 struct ext4_xattr_ino_array **array);
-+extern void ext4_xattr_inode_array_free(struct inode *inode,
-+                                      struct ext4_xattr_ino_array *array);
- extern void ext4_xattr_put_super(struct super_block *);
- extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
-Index: linux-stage/fs/ext4/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext4/ialloc.c
-+++ linux-stage/fs/ext4/ialloc.c
-@@ -252,7 +252,6 @@ void ext4_free_inode(handle_t *handle, s
-        * as writing the quota to disk may need the lock as well.
-        */
-       dquot_initialize(inode);
--      ext4_xattr_delete_inode(handle, inode);
-       dquot_free_inode(inode);
-       dquot_drop(inode);
-Index: linux-stage/fs/ext4/inline.c
-===================================================================
---- linux-stage.orig/fs/ext4/inline.c
-+++ linux-stage/fs/ext4/inline.c
-@@ -59,7 +59,7 @@ static int get_max_inline_xattr_value_si
-       /* Compute min_offs. */
-       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
--              if (!entry->e_value_block && entry->e_value_size) {
-+              if (!entry->e_value_inum && entry->e_value_size) {
-                       size_t offs = le16_to_cpu(entry->e_value_offs);
-                       if (offs < min_offs)
-                               min_offs = offs;
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch
deleted file mode 100644 (file)
index 0b99845..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-Index: linux-3.12.39-47.1/fs/ext4/ext4.h
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/ext4.h
-+++ linux-3.12.39-47.1/fs/ext4/ext4.h
-@@ -1431,6 +1431,8 @@ static inline void ext4_clear_state_flag
- #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
-+#define JOURNAL_START_HAS_3ARGS       1
-+
- /*
-  * Codes for operating systems
-  */
-@@ -2694,6 +2696,11 @@ struct ext4_extent;
- extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
- extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
-+extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
-+                                                ext4_group_t block_group);
-+extern struct buffer_head *ext4_append(handle_t *handle,
-+                                     struct inode *inode,
-+                                     ext4_lblk_t *block);
- extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
- extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-                              struct ext4_map_blocks *map, int flags);
-Index: linux-3.12.39-47.1/fs/ext4/namei.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/namei.c
-+++ linux-3.12.39-47.1/fs/ext4/namei.c
-@@ -48,7 +48,7 @@
- #define NAMEI_RA_BLOCKS  4
- #define NAMEI_RA_SIZE      (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
--static struct buffer_head *ext4_append(handle_t *handle,
-+struct buffer_head *ext4_append(handle_t *handle,
-                                       struct inode *inode,
-                                       ext4_lblk_t *block)
- {
-@@ -71,6 +2200,7 @@ out:
-               return ERR_PTR(err);
-       inode->i_size += inode->i_sb->s_blocksize;
-       EXT4_I(inode)->i_disksize = inode->i_size;
-+      BUFFER_TRACE(bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, bh);
-       if (err) {
-               brelse(bh);
-@@ -154,6 +154,7 @@ static struct buffer_head *__ext4_read_d
-       }
-       return bh;
- }
-+EXPORT_SYMBOL(ext4_append);
- #ifndef assert
- #define assert(test) J_ASSERT(test)
-@@ -2199,7 +2200,7 @@ out:
-  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
-  * since this indicates that nlinks count was previously 1.
-  */
--static void ext4_inc_count(handle_t *handle, struct inode *inode)
-+void ext4_inc_count(handle_t *handle, struct inode *inode)
- {
-       inc_nlink(inode);
-       if (is_dx(inode) && inode->i_nlink > 1) {
-@@ -2211,16 +2212,18 @@ static void ext4_inc_count(handle_t *han
-               }
-       }
- }
-+EXPORT_SYMBOL(ext4_inc_count);
- /*
-  * If a directory had nlink == 1, then we should let it be 1. This indicates
-  * directory has >EXT4_LINK_MAX subdirs.
-  */
--static void ext4_dec_count(handle_t *handle, struct inode *inode)
-+void ext4_dec_count(handle_t *handle, struct inode *inode)
- {
-       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
-               drop_nlink(inode);
- }
-+EXPORT_SYMBOL(ext4_dec_count);
- static int ext4_add_nondir(handle_t *handle,
-Index: linux-3.12.39-47.1/fs/ext4/ialloc.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/ialloc.c
-+++ linux-3.12.39-47.1/fs/ext4/ialloc.c
-@@ -109,7 +109,7 @@ void ext4_end_bitmap_read(struct buffer_
-  *
-  * Return buffer_head of bitmap on success or NULL.
-  */
--static struct buffer_head *
-+struct buffer_head *
- ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
- {
-       struct ext4_group_desc *desc;
-@@ -192,6 +192,7 @@ verify:
-       set_buffer_verified(bh);
-       return bh;
- }
-+EXPORT_SYMBOL(ext4_read_inode_bitmap);
- /*
-  * NOTE! When we get the inode, we're the only people
-Index: linux-3.12.39-47.1/fs/ext4/inode.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/inode.c
-+++ linux-3.12.39-47.1/fs/ext4/inode.c
-@@ -5186,3 +5186,17 @@ out:
-       sb_end_pagefault(inode->i_sb);
-       return ret;
- }
-+EXPORT_SYMBOL(ext4_map_blocks);
-+EXPORT_SYMBOL(ext4_truncate);
-+EXPORT_SYMBOL(ext4_iget);
-+EXPORT_SYMBOL(ext4_bread);
-+EXPORT_SYMBOL(ext4_itable_unused_count);
-+EXPORT_SYMBOL(ext4_force_commit);
-+EXPORT_SYMBOL(ext4_mark_inode_dirty);
-+EXPORT_SYMBOL(ext4_get_group_desc);
-+EXPORT_SYMBOL(__ext4_journal_get_write_access);
-+EXPORT_SYMBOL(__ext4_journal_start_sb);
-+EXPORT_SYMBOL(__ext4_journal_stop);
-+EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
-+EXPORT_SYMBOL(__ext4_std_error);
-+EXPORT_SYMBOL(ext4fs_dirhash);
-Index: linux-3.12.39-47.1/fs/ext4/mballoc.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/mballoc.c
-+++ linux-3.12.39-47.1/fs/ext4/mballoc.c
-@@ -759,6 +759,9 @@ void ext4_mb_generate_buddy(struct super
-                * corrupt and update bb_free using bitmap value
-                */
-               grp->bb_free = free;
-+              if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
-+                      percpu_counter_sub(&sbi->s_freeclusters_counter,
-+                                         grp->bb_free);
-               set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-       }
-       mb_set_largest_free_order(sb, grp);
-Index: linux-3.12.39-47.1/fs/ext4/xattr.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/xattr.c
-+++ linux-3.12.39-47.1/fs/ext4/xattr.c
-@@ -541,6 +541,7 @@ ext4_xattr_release_block(handle_t *handl
-       int error = 0;
-       ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
-+      BUFFER_TRACE(bh, "get_write_access");
-       error = ext4_journal_get_write_access(handle, bh);
-       if (error)
-               goto out;
-@@ -781,6 +782,7 @@ ext4_xattr_block_set(handle_t *handle, s
-       if (s->base) {
-               ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
-                                       bs->bh->b_blocknr);
-+              BUFFER_TRACE(bs->bh, "get_write_access");
-               error = ext4_journal_get_write_access(handle, bs->bh);
-               if (error)
-                       goto cleanup;
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-osd-iop-common.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-osd-iop-common.patch
deleted file mode 100644 (file)
index b6a2908..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index fa0f45ae..8f89024e 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -2239,6 +2239,12 @@ extern int ext4_orphan_add(handle_t *, struct inode *);
- extern int ext4_orphan_del(handle_t *, struct inode *);
- extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-                               __u32 start_minor_hash, __u32 *next_hash);
-+extern struct inode *ext4_create_inode(handle_t *handle,
-+                                     struct inode *dir, int mode,
-+                                     uid_t *owner);
-+extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
-+                           struct ext4_dir_entry_2 *de_del,
-+                           struct buffer_head *bh);
- extern int search_dir(struct buffer_head *bh,
-                     char *search_buf,
-                     int buf_size,
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 8e265e61..77d0782c 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -2176,7 +2176,7 @@ int ext4_generic_delete_entry(handle_t *handle,
-       return -ENOENT;
- }
--static int ext4_delete_entry(handle_t *handle,
-+int ext4_delete_entry(handle_t *handle,
-                            struct inode *dir,
-                            struct ext4_dir_entry_2 *de_del,
-                            struct buffer_head *bh)
-@@ -2216,7 +2216,7 @@ out:
-               ext4_std_error(dir->i_sb, err);
-       return err;
- }
--
-+EXPORT_SYMBOL(ext4_delete_entry);
- /*
-  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
-  * since this indicates that nlinks count was previously 1.
-@@ -2263,6 +2263,29 @@ static int ext4_add_nondir(handle_t *handle,
-       return err;
- }
-+ /* Return locked inode, then the caller can modify the inode's states/flags
-+  * before others finding it. The caller should unlock the inode by itself. */
-+struct inode *ext4_create_inode(handle_t *handle, struct inode *dir, int mode,
-+                              uid_t *owner)
-+{
-+      struct inode *inode;
-+
-+      inode = ext4_new_inode(handle, dir, mode, NULL, 0, owner);
-+      if (!IS_ERR(inode)) {
-+              if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
-+#ifdef CONFIG_LDISKFS_FS_XATTR
-+                      inode->i_op = &ext4_special_inode_operations;
-+#endif
-+              } else {
-+                      inode->i_op = &ext4_file_inode_operations;
-+                      inode->i_fop = &ext4_file_operations;
-+                      ext4_set_aops(inode);
-+              }
-+      }
-+      return inode;
-+}
-+EXPORT_SYMBOL(ext4_create_inode);
-+
- /*
-  * By the time this is called, we already have created
-  * the directory cache entry for the new file, but it
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch
deleted file mode 100644 (file)
index 556453d..0000000
+++ /dev/null
@@ -1,1942 +0,0 @@
-Single directory performance is a critical for HPC workloads. In a
-typical use case an application creates a separate output file for
-each node and task in a job. As nodes and tasks increase, hundreds
-of thousands of files may be created in a single directory within
-a short window of time.
-Today, both filename lookup and file system modifying operations
-(such as create and unlink) are protected with a single lock for
-an entire ldiskfs directory. PDO project will remove this
-bottleneck by introducing a parallel locking mechanism for entire
-ldiskfs directories. This work will enable multiple application
-threads to simultaneously lookup, create and unlink in parallel.
-    
-This patch contains:
- - pdirops support for ldiskfs
- - integrate with osd-ldiskfs
-
-Index: linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
-===================================================================
---- /dev/null
-+++ linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h
-@@ -0,0 +1,187 @@
-+/*
-+ * include/linux/htree_lock.h
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+
-+/*
-+ * htree lock
-+ *
-+ * htree_lock is an advanced lock, it can support five lock modes (concept is
-+ * taken from DLM) and it's a sleeping lock.
-+ *
-+ * most common use case is:
-+ * - create a htree_lock_head for data
-+ * - each thread (contender) creates it's own htree_lock
-+ * - contender needs to call htree_lock(lock_node, mode) to protect data and
-+ *   call htree_unlock to release lock
-+ *
-+ * Also, there is advanced use-case which is more complex, user can have
-+ * PW/PR lock on particular key, it's mostly used while user holding shared
-+ * lock on the htree (CW, CR)
-+ *
-+ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
-+ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
-+ * ...
-+ * htree_node_unlock(lock_node);; unlock the key
-+ *
-+ * Another tip is, we can have N-levels of this kind of keys, all we need to
-+ * do is specifying N-levels while creating htree_lock_head, then we can
-+ * lock/unlock a specific level by:
-+ * htree_node_lock(lock_node, mode1, key1, level1...);
-+ * do something;
-+ * htree_node_lock(lock_node, mode1, key2, level2...);
-+ * do something;
-+ * htree_node_unlock(lock_node, level2);
-+ * htree_node_unlock(lock_node, level1);
-+ *
-+ * NB: for multi-level, should be careful about locking order to avoid deadlock
-+ */
-+
-+#ifndef _LINUX_HTREE_LOCK_H
-+#define _LINUX_HTREE_LOCK_H
-+
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Lock Modes
-+ * more details can be found here:
-+ * http://en.wikipedia.org/wiki/Distributed_lock_manager
-+ */
-+typedef enum {
-+      HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
-+      HTREE_LOCK_PW,       /* protected write: allows only CR users */
-+      HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
-+      HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
-+      HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
-+      HTREE_LOCK_MAX,      /* number of lock modes */
-+} htree_lock_mode_t;
-+
-+#define HTREE_LOCK_NL         HTREE_LOCK_MAX
-+#define HTREE_LOCK_INVAL      0xdead10c
-+
-+enum {
-+      HTREE_HBITS_MIN         = 2,
-+      HTREE_HBITS_DEF         = 14,
-+      HTREE_HBITS_MAX         = 32,
-+};
-+
-+enum {
-+      HTREE_EVENT_DISABLE     = (0),
-+      HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
-+      HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
-+      HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
-+};
-+
-+struct htree_lock;
-+
-+typedef void (*htree_event_cb_t)(void *target, void *event);
-+
-+struct htree_lock_child {
-+      struct list_head        lc_list;        /* granted list */
-+      htree_event_cb_t        lc_callback;    /* event callback */
-+      unsigned                lc_events;      /* event types */
-+};
-+
-+struct htree_lock_head {
-+      unsigned long           lh_lock;        /* bits lock */
-+      /* blocked lock list (htree_lock) */
-+      struct list_head        lh_blocked_list;
-+      /* # key levels */
-+      u16                     lh_depth;
-+      /* hash bits for key and limit number of locks */
-+      u16                     lh_hbits;
-+      /* counters for blocked locks */
-+      u16                     lh_nblocked[HTREE_LOCK_MAX];
-+      /* counters for granted locks */
-+      u16                     lh_ngranted[HTREE_LOCK_MAX];
-+      /* private data */
-+      void                    *lh_private;
-+      /* array of children locks */
-+      struct htree_lock_child lh_children[0];
-+};
-+
-+/* htree_lock_node_t is child-lock for a specific key (ln_value) */
-+struct htree_lock_node {
-+      htree_lock_mode_t       ln_mode;
-+      /* major hash key */
-+      u16                     ln_major_key;
-+      /* minor hash key */
-+      u16                     ln_minor_key;
-+      struct list_head        ln_major_list;
-+      struct list_head        ln_minor_list;
-+      /* alive list, all locks (granted, blocked, listening) are on it */
-+      struct list_head        ln_alive_list;
-+      /* blocked list */
-+      struct list_head        ln_blocked_list;
-+      /* granted list */
-+      struct list_head        ln_granted_list;
-+      void                    *ln_ev_target;
-+};
-+
-+struct htree_lock {
-+      struct task_struct      *lk_task;
-+      struct htree_lock_head  *lk_head;
-+      void                    *lk_private;
-+      unsigned                lk_depth;
-+      htree_lock_mode_t       lk_mode;
-+      struct list_head        lk_blocked_list;
-+      struct htree_lock_node  lk_nodes[0];
-+};
-+
-+/* create a lock head, which stands for a resource */
-+struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
-+                                            unsigned hbits, unsigned priv);
-+/* free a lock head */
-+void htree_lock_head_free(struct htree_lock_head *lhead);
-+/* register event callback for child lock at level @depth */
-+void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
-+                           unsigned events, htree_event_cb_t callback);
-+/* create a lock handle, which stands for a thread */
-+struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
-+/* free a lock handle */
-+void htree_lock_free(struct htree_lock *lck);
-+/* lock htree, when @wait is true, 0 is returned if the lock can't
-+ * be granted immediately */
-+int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                 htree_lock_mode_t mode, int wait);
-+/* unlock htree */
-+void htree_unlock(struct htree_lock *lck);
-+/* unlock and relock htree with @new_mode */
-+int htree_change_lock_try(struct htree_lock *lck,
-+                        htree_lock_mode_t new_mode, int wait);
-+void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
-+/* require child lock (key) of htree at level @dep, @event will be sent to all
-+ * listeners on this @key while lock being granted */
-+int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                      u32 key, unsigned dep, int wait, void *event);
-+/* release child lock at level @dep, this lock will listen on it's key
-+ * if @event isn't NULL, event_cb will be called against @lck while granting
-+ * any other lock at level @dep with the same key */
-+void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
-+/* stop listening on child lock at level @dep */
-+void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
-+/* for debug */
-+void htree_lock_stat_print(int depth);
-+void htree_lock_stat_reset(void);
-+
-+#define htree_lock(lck, lh, mode)     htree_lock_try(lck, lh, mode, 1)
-+#define htree_change_lock(lck, mode)  htree_change_lock_try(lck, mode, 1)
-+
-+#define htree_lock_mode(lck)          ((lck)->lk_mode)
-+
-+#define htree_node_lock(lck, mode, key, dep)  \
-+      htree_node_lock_try(lck, mode, key, dep, 1, NULL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_granted(lck, dep)               \
-+      ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
-+       (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_listening(lck, dep)     \
-+      ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
-+
-+#endif
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-===================================================================
---- /dev/null
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,891 @@
-+/*
-+ * fs/ext4/htree_lock.c
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+#include <linux/jbd2.h>
-+#include <linux/hash.h>
-+#include <linux/module.h>
-+#include <linux/htree_lock.h>
-+
-+enum {
-+      HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
-+      HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
-+      HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
-+      HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
-+      HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
-+};
-+
-+enum {
-+      HTREE_LOCK_COMPAT_EX    = 0,
-+      HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
-+      HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
-+      HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
-+      HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
-+                                HTREE_LOCK_BIT_PW,
-+};
-+
-+static int htree_lock_compat[] = {
-+      [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
-+      [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
-+      [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
-+      [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
-+      [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
-+};
-+
-+/* max allowed htree-lock depth.
-+ * We only need depth=3 for ext4 although user can have higher value. */
-+#define HTREE_LOCK_DEP_MAX    16
-+
-+#ifdef HTREE_LOCK_DEBUG
-+
-+static char *hl_name[] = {
-+      [HTREE_LOCK_EX]         "EX",
-+      [HTREE_LOCK_PW]         "PW",
-+      [HTREE_LOCK_PR]         "PR",
-+      [HTREE_LOCK_CW]         "CW",
-+      [HTREE_LOCK_CR]         "CR",
-+};
-+
-+/* lock stats */
-+struct htree_lock_node_stats {
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      retried[HTREE_LOCK_MAX];
-+      unsigned long long      events;
-+};
-+
-+struct htree_lock_stats {
-+      struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+};
-+
-+static struct htree_lock_stats hl_stats;
-+
-+void htree_lock_stat_reset(void)
-+{
-+      memset(&hl_stats, 0, sizeof(hl_stats));
-+}
-+
-+void htree_lock_stat_print(int depth)
-+{
-+      int     i;
-+      int     j;
-+
-+      printk(KERN_DEBUG "HTREE LOCK STATS:\n");
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
-+                     hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
-+      }
-+      for (i = 0; i < depth; i++) {
-+              printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
-+              for (j = 0; j < HTREE_LOCK_MAX; j++) {
-+                      printk(KERN_DEBUG
-+                              "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
-+                              hl_name[j], hl_stats.nodes[i].granted[j],
-+                              hl_stats.nodes[i].blocked[j],
-+                              hl_stats.nodes[i].retried[j]);
-+              }
-+      }
-+}
-+
-+#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
-+#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
-+#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
-+#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
-+#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
-+#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
-+
-+#else /* !DEBUG */
-+
-+void htree_lock_stat_reset(void) {}
-+void htree_lock_stat_print(int depth) {}
-+
-+#define lk_grant_inc(m)             do {} while (0)
-+#define lk_block_inc(m)             do {} while (0)
-+#define ln_grant_inc(d, m)    do {} while (0)
-+#define ln_block_inc(d, m)    do {} while (0)
-+#define ln_retry_inc(d, m)    do {} while (0)
-+#define ln_event_inc(d)             do {} while (0)
-+
-+#endif /* DEBUG */
-+
-+EXPORT_SYMBOL(htree_lock_stat_reset);
-+EXPORT_SYMBOL(htree_lock_stat_print);
-+
-+#define HTREE_DEP_ROOT                  (-1)
-+
-+#define htree_spin_lock(lhead, dep)                           \
-+      bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
-+#define htree_spin_unlock(lhead, dep)                         \
-+      bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
-+
-+#define htree_key_event_ignore(child, ln)                     \
-+      (!((child)->lc_events & (1 << (ln)->ln_mode)))
-+
-+static int
-+htree_key_list_empty(struct htree_lock_node *ln)
-+{
-+      return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
-+}
-+
-+static void
-+htree_key_list_del_init(struct htree_lock_node *ln)
-+{
-+      struct htree_lock_node *tmp = NULL;
-+
-+      if (!list_empty(&ln->ln_minor_list)) {
-+              tmp = list_entry(ln->ln_minor_list.next,
-+                               struct htree_lock_node, ln_minor_list);
-+              list_del_init(&ln->ln_minor_list);
-+      }
-+
-+      if (list_empty(&ln->ln_major_list))
-+              return;
-+
-+      if (tmp == NULL) { /* not on minor key list */
-+              list_del_init(&ln->ln_major_list);
-+      } else {
-+              BUG_ON(!list_empty(&tmp->ln_major_list));
-+              list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
-+      }
-+}
-+
-+static void
-+htree_key_list_replace_init(struct htree_lock_node *old,
-+                          struct htree_lock_node *new)
-+{
-+      if (!list_empty(&old->ln_major_list))
-+              list_replace_init(&old->ln_major_list, &new->ln_major_list);
-+
-+      if (!list_empty(&old->ln_minor_list))
-+              list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
-+}
-+
-+static void
-+htree_key_event_enqueue(struct htree_lock_child *child,
-+                      struct htree_lock_node *ln, int dep, void *event)
-+{
-+      struct htree_lock_node *tmp;
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
-+      if (event == NULL || htree_key_event_ignore(child, ln))
-+              return;
-+
-+      /* shouldn't be a very long list */
-+      list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
-+              if (tmp->ln_mode == HTREE_LOCK_NL) {
-+                      ln_event_inc(dep);
-+                      if (child->lc_callback != NULL)
-+                              child->lc_callback(tmp->ln_ev_target, event);
-+              }
-+      }
-+}
-+
-+static int
-+htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
-+                      unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
-+      struct htree_lock_node *newln = &newlk->lk_nodes[dep];
-+      struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      /* NB: we only expect PR/PW lock mode at here, only these two modes are
-+       * allowed for htree_node_lock(asserted in htree_node_lock_internal),
-+       * NL is only used for listener, user can't directly require NL mode */
-+      if ((curln->ln_mode == HTREE_LOCK_NL) ||
-+          (curln->ln_mode != HTREE_LOCK_PW &&
-+           newln->ln_mode != HTREE_LOCK_PW)) {
-+              /* no conflict, attach it on granted list of @curlk */
-+              if (curln->ln_mode != HTREE_LOCK_NL) {
-+                      list_add(&newln->ln_granted_list,
-+                               &curln->ln_granted_list);
-+              } else {
-+                      /* replace key owner */
-+                      htree_key_list_replace_init(curln, newln);
-+              }
-+
-+              list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+              htree_key_event_enqueue(child, newln, dep, event);
-+              ln_grant_inc(dep, newln->ln_mode);
-+              return 1; /* still hold lh_lock */
-+      }
-+
-+      if (!wait) { /* can't grant and don't want to wait */
-+              ln_retry_inc(dep, newln->ln_mode);
-+              newln->ln_mode = HTREE_LOCK_INVAL;
-+              return -1; /* don't wait and just return -1 */
-+      }
-+
-+      newlk->lk_task = current;
-+      set_current_state(TASK_UNINTERRUPTIBLE);
-+      /* conflict, attach it on blocked list of curlk */
-+      list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
-+      list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+      ln_block_inc(dep, newln->ln_mode);
-+
-+      htree_spin_unlock(newlk->lk_head, dep);
-+      /* wait to be given the lock */
-+      if (newlk->lk_task != NULL)
-+              schedule();
-+      /* granted, no doubt, wake up will set me RUNNING */
-+      if (event == NULL || htree_key_event_ignore(child, newln))
-+              return 0; /* granted without lh_lock */
-+
-+      htree_spin_lock(newlk->lk_head, dep);
-+      htree_key_event_enqueue(child, newln, dep, event);
-+      return 1; /* still hold lh_lock */
-+}
-+
-+/*
-+ * get PR/PW access to particular tree-node according to @dep and @key,
-+ * it will return -1 if @wait is false and can't immediately grant this lock.
-+ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
-+ * @event if it's not NULL.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
-+                       htree_lock_mode_t mode, u32 key, unsigned dep,
-+                       int wait, void *event)
-+{
-+      LIST_HEAD(list);
-+      struct htree_lock       *tmp;
-+      struct htree_lock       *tmp2;
-+      u16                     major;
-+      u16                     minor;
-+      u8                      reverse;
-+      u8                      ma_bits;
-+      u8                      mi_bits;
-+
-+      BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+
-+      key = hash_long(key, lhead->lh_hbits);
-+
-+      mi_bits = lhead->lh_hbits >> 1;
-+      ma_bits = lhead->lh_hbits - mi_bits;
-+
-+      lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
-+      lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
-+      lck->lk_nodes[dep].ln_mode = mode;
-+
-+      /*
-+       * The major key list is an ordered list, so searches are started
-+       * at the end of the list that is numerically closer to major_key,
-+       * so at most half of the list will be walked (for well-distributed
-+       * keys). The list traversal aborts early if the expected key
-+       * location is passed.
-+       */
-+      reverse = (major >= (1 << (ma_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp,
-+                                      &lhead->lh_children[dep].lc_list,
-+                                      lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key < major) {
-+                              /* attach _after_ @tmp */
-+                              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                                       &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                       &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+
-+      } else {
-+              list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
-+                                  lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key > major) {
-+                              /* insert _before_ @tmp */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                                      &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                            &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+      }
-+
-+ search_minor:
-+      /*
-+       * NB: minor_key list doesn't have a "head", @list is just a
-+       * temporary stub for helping list searching, make sure it's removed
-+       * after searching.
-+       * minor_key list is an ordered list too.
-+       */
-+      list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
-+
-+      reverse = (minor >= (1 << (mi_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp2, &list,
-+                                          lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
-+                              /* attach _after_ @tmp2 */
-+                              list_add(&lck->lk_nodes[dep].ln_minor_list,
-+                                       &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
-+
-+      } else {
-+              list_for_each_entry(tmp2, &list,
-+                                  lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
-+                              /* insert _before_ @tmp2 */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
-+                                      &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
-+      }
-+
-+ out_grant_minor:
-+      if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
-+              /* new lock @lck is the first one on minor_key list, which
-+               * means it has the smallest minor_key and it should
-+               * replace @tmp as minor_key owner */
-+              list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
-+                                &lck->lk_nodes[dep].ln_major_list);
-+      }
-+      /* remove the temporary head */
-+      list_del(&list);
-+
-+ out_grant_major:
-+      ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
-+      return 1; /* granted with holding lh_lock */
-+
-+ out_enqueue:
-+      list_del(&list); /* remove temprary head */
-+      return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
-+}
-+
-+/*
-+ * release the key of @lck at level @dep, and grant any blocked locks.
-+ * caller will still listen on @key if @event is not NULL, which means
-+ * caller can see a event (by event_cb) while granting any lock with
-+ * the same key at level @dep.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
-+ */
-+static void
-+htree_node_unlock_internal(struct htree_lock_head *lhead,
-+                         struct htree_lock *curlk, unsigned dep, void *event)
-+{
-+      struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
-+      struct htree_lock       *grtlk = NULL;
-+      struct htree_lock_node  *grtln;
-+      struct htree_lock       *poslk;
-+      struct htree_lock       *tmplk;
-+
-+      if (!htree_node_is_granted(curlk, dep))
-+              return;
-+
-+      if (!list_empty(&curln->ln_granted_list)) {
-+              /* there is another granted lock */
-+              grtlk = list_entry(curln->ln_granted_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_granted_list);
-+              list_del_init(&curln->ln_granted_list);
-+      }
-+
-+      if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
-+              /*
-+               * @curlk is the only granted lock, so we confirmed:
-+               * a) curln is key owner (attached on major/minor_list),
-+               *    so if there is any blocked lock, it should be attached
-+               *    on curln->ln_blocked_list
-+               * b) we always can grant the first blocked lock
-+               */
-+              grtlk = list_entry(curln->ln_blocked_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_blocked_list);
-+              BUG_ON(grtlk->lk_task == NULL);
-+              wake_up_process(grtlk->lk_task);
-+      }
-+
-+      if (event != NULL &&
-+          lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
-+              curln->ln_ev_target = event;
-+              curln->ln_mode = HTREE_LOCK_NL; /* listen! */
-+      } else {
-+              curln->ln_mode = HTREE_LOCK_INVAL;
-+      }
-+
-+      if (grtlk == NULL) { /* I must be the only one locking this key */
-+              struct htree_lock_node *tmpln;
-+
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
-+                      return;
-+
-+              /* not listening */
-+              if (list_empty(&curln->ln_alive_list)) { /* no more listener */
-+                      htree_key_list_del_init(curln);
-+                      return;
-+              }
-+
-+              tmpln = list_entry(curln->ln_alive_list.next,
-+                                 struct htree_lock_node, ln_alive_list);
-+
-+              BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
-+
-+              htree_key_list_replace_init(curln, tmpln);
-+              list_del_init(&curln->ln_alive_list);
-+
-+              return;
-+      }
-+
-+      /* have a granted lock */
-+      grtln = &grtlk->lk_nodes[dep];
-+      if (!list_empty(&curln->ln_blocked_list)) {
-+              /* only key owner can be on both lists */
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (list_empty(&grtln->ln_blocked_list)) {
-+                      list_add(&grtln->ln_blocked_list,
-+                               &curln->ln_blocked_list);
-+              }
-+              list_del_init(&curln->ln_blocked_list);
-+      }
-+      /*
-+       * NB: this is the tricky part:
-+       * We have only two modes for child-lock (PR and PW), also,
-+       * only owner of the key (attached on major/minor_list) can be on
-+       * both blocked_list and granted_list, so @grtlk must be one
-+       * of these two cases:
-+       *
-+       * a) @grtlk is taken from granted_list, which means we've granted
-+       *    more than one lock so @grtlk has to be PR, the first blocked
-+       *    lock must be PW and we can't grant it at all.
-+       *    So even @grtlk is not owner of the key (empty blocked_list),
-+       *    we don't care because we can't grant any lock.
-+       * b) we just grant a new lock which is taken from head of blocked
-+       *    list, and it should be the first granted lock, and it should
-+       *    be the first one linked on blocked_list.
-+       *
-+       * Either way, we can get correct result by iterating blocked_list
-+       * of @grtlk, and don't have to bother on how to find out
-+       * owner of current key.
-+       */
-+      list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
-+                               lk_nodes[dep].ln_blocked_list) {
-+              if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
-+                  poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
-+                      break;
-+              /* grant all readers */
-+              list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
-+              list_add(&poslk->lk_nodes[dep].ln_granted_list,
-+                       &grtln->ln_granted_list);
-+
-+              BUG_ON(poslk->lk_task == NULL);
-+              wake_up_process(poslk->lk_task);
-+      }
-+
-+      /* if @curln is the owner of this key, replace it with @grtln */
-+      if (!htree_key_list_empty(curln))
-+              htree_key_list_replace_init(curln, grtln);
-+
-+      if (curln->ln_mode == HTREE_LOCK_INVAL)
-+              list_del_init(&curln->ln_alive_list);
-+}
-+
-+/*
-+ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
-+ * and 0 only if @wait is false and can't grant it immediately
-+ */
-+int
-+htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                  u32 key, unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, dep);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_node_lock_try);
-+
-+/* it's wrapper of htree_node_unlock_internal */
-+void
-+htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      htree_node_unlock_internal(lhead, lck, dep, event);
-+      htree_spin_unlock(lhead, dep);
-+}
-+EXPORT_SYMBOL(htree_node_unlock);
-+
-+/* stop listening on child-lock level @dep */
-+void
-+htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
-+{
-+      struct htree_lock_node *ln = &lck->lk_nodes[dep];
-+      struct htree_lock_node *tmp;
-+
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+      BUG_ON(!list_empty(&ln->ln_blocked_list));
-+      BUG_ON(!list_empty(&ln->ln_granted_list));
-+
-+      if (!htree_node_is_listening(lck, dep))
-+              return;
-+
-+      htree_spin_lock(lck->lk_head, dep);
-+      ln->ln_mode = HTREE_LOCK_INVAL;
-+      ln->ln_ev_target = NULL;
-+
-+      if (htree_key_list_empty(ln)) { /* not owner */
-+              list_del_init(&ln->ln_alive_list);
-+              goto out;
-+      }
-+
-+      /* I'm the owner... */
-+      if (list_empty(&ln->ln_alive_list)) { /* no more listener */
-+              htree_key_list_del_init(ln);
-+              goto out;
-+      }
-+
-+      tmp = list_entry(ln->ln_alive_list.next,
-+                       struct htree_lock_node, ln_alive_list);
-+
-+      BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
-+      htree_key_list_replace_init(ln, tmp);
-+      list_del_init(&ln->ln_alive_list);
-+ out:
-+      htree_spin_unlock(lck->lk_head, dep);
-+}
-+EXPORT_SYMBOL(htree_node_stop_listen);
-+
-+/* release all child-locks if we have any */
-+static void
-+htree_node_release_all(struct htree_lock *lck)
-+{
-+      int     i;
-+
-+      for (i = 0; i < lck->lk_depth; i++) {
-+              if (htree_node_is_granted(lck, i))
-+                      htree_node_unlock(lck, i, NULL);
-+              else if (htree_node_is_listening(lck, i))
-+                      htree_node_stop_listen(lck, i);
-+      }
-+}
-+
-+/*
-+ * obtain htree lock, it could be blocked inside if there's conflict
-+ * with any granted or blocked lock and @wait is true.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_lock_internal(struct htree_lock *lck, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int     granted = 0;
-+      int     blocked = 0;
-+      int     i;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+              if (lhead->lh_nblocked[i] != 0)
-+                      blocked |= 1 << i;
-+      }
-+      if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
-+          (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
-+              /* will block current lock even it just conflicts with any
-+               * other blocked lock, so lock like EX wouldn't starve */
-+              if (!wait)
-+                      return -1;
-+              lhead->lh_nblocked[lck->lk_mode]++;
-+              lk_block_inc(lck->lk_mode);
-+
-+              lck->lk_task = current;
-+              list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
-+
-+retry:
-+              set_current_state(TASK_UNINTERRUPTIBLE);
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+              /* wait to be given the lock */
-+              if (lck->lk_task != NULL)
-+                      schedule();
-+              /* granted, no doubt. wake up will set me RUNNING.
-+               * Since thread would be waken up accidentally,
-+               * so we need check lock whether granted or not again. */
-+              if (!list_empty(&lck->lk_blocked_list)) {
-+                      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+                      if (list_empty(&lck->lk_blocked_list)) {
-+                              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+                              return 0;
-+                      }
-+                      goto retry;
-+              }
-+              return 0; /* without lh_lock */
-+      }
-+      lhead->lh_ngranted[lck->lk_mode]++;
-+      lk_grant_inc(lck->lk_mode);
-+      return 1;
-+}
-+
-+/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
-+static void
-+htree_unlock_internal(struct htree_lock *lck)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      struct htree_lock *tmp;
-+      struct htree_lock *tmp2;
-+      int granted = 0;
-+      int i;
-+
-+      BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
-+
-+      lhead->lh_ngranted[lck->lk_mode]--;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+      }
-+      list_for_each_entry_safe(tmp, tmp2,
-+                               &lhead->lh_blocked_list, lk_blocked_list) {
-+              /* conflict with any granted lock? */
-+              if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
-+                      break;
-+
-+              list_del_init(&tmp->lk_blocked_list);
-+
-+              BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
-+
-+              lhead->lh_nblocked[tmp->lk_mode]--;
-+              lhead->lh_ngranted[tmp->lk_mode]++;
-+              granted |= 1 << tmp->lk_mode;
-+
-+              BUG_ON(tmp->lk_task == NULL);
-+              wake_up_process(tmp->lk_task);
-+      }
-+}
-+
-+/* it's wrapper of htree_lock_internal and exported interface.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+             htree_lock_mode_t mode, int wait)
-+{
-+      int     rc;
-+
-+      BUG_ON(lck->lk_depth > lhead->lh_depth);
-+      BUG_ON(lck->lk_head != NULL);
-+      BUG_ON(lck->lk_task != NULL);
-+
-+      lck->lk_head = lhead;
-+      lck->lk_mode = mode;
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_lock_try);
-+
-+/* it's wrapper of htree_unlock_internal and exported interface.
-+ * It will release all htree_node_locks and htree_lock */
-+void
-+htree_unlock(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_head == NULL);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
-+      lck->lk_head = NULL;
-+      lck->lk_task = NULL;
-+}
-+EXPORT_SYMBOL(htree_unlock);
-+
-+/* change lock mode */
-+void
-+htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
-+{
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+      lck->lk_mode = mode;
-+}
-+EXPORT_SYMBOL(htree_change_mode);
-+
-+/* release htree lock, and lock it again with new mode.
-+ * This function will first release all htree_node_locks and htree_lock,
-+ * then try to gain htree_lock with new @mode.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(lhead == NULL);
-+      BUG_ON(lck->lk_mode == mode);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      lck->lk_mode = mode;
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_change_lock_try);
-+
-+/* create a htree_lock head with @depth levels (number of child-locks),
-+ * it is a per resoruce structure */
-+struct htree_lock_head *
-+htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
-+{
-+      struct htree_lock_head *lhead;
-+      int  i;
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+
-+      lhead = kzalloc(offsetof(struct htree_lock_head,
-+                               lh_children[depth]) + priv, GFP_NOFS);
-+      if (lhead == NULL)
-+              return NULL;
-+
-+      if (hbits < HTREE_HBITS_MIN)
-+              lhead->lh_hbits = HTREE_HBITS_MIN;
-+      else if (hbits > HTREE_HBITS_MAX)
-+              lhead->lh_hbits = HTREE_HBITS_MAX;
-+
-+      lhead->lh_lock = 0;
-+      lhead->lh_depth = depth;
-+      INIT_LIST_HEAD(&lhead->lh_blocked_list);
-+      if (priv > 0) {
-+              lhead->lh_private = (void *)lhead +
-+                      offsetof(struct htree_lock_head, lh_children[depth]);
-+      }
-+
-+      for (i = 0; i < depth; i++) {
-+              INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
-+              lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(htree_lock_head_alloc);
-+
-+/* free the htree_lock head */
-+void
-+htree_lock_head_free(struct htree_lock_head *lhead)
-+{
-+      int     i;
-+
-+      BUG_ON(!list_empty(&lhead->lh_blocked_list));
-+      for (i = 0; i < lhead->lh_depth; i++)
-+              BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
-+      kfree(lhead);
-+}
-+EXPORT_SYMBOL(htree_lock_head_free);
-+
-+/* register event callback for @events of child-lock at level @dep */
-+void
-+htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
-+                      unsigned events, htree_event_cb_t callback)
-+{
-+      BUG_ON(lhead->lh_depth <= dep);
-+      lhead->lh_children[dep].lc_events = events;
-+      lhead->lh_children[dep].lc_callback = callback;
-+}
-+EXPORT_SYMBOL(htree_lock_event_attach);
-+
-+/* allocate a htree_lock, which is per-thread structure, @pbytes is some
-+ * extra-bytes as private data for caller */
-+struct htree_lock *
-+htree_lock_alloc(unsigned depth, unsigned pbytes)
-+{
-+      struct htree_lock *lck;
-+      int i = offsetof(struct htree_lock, lk_nodes[depth]);
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+      lck = kzalloc(i + pbytes, GFP_NOFS);
-+      if (lck == NULL)
-+              return NULL;
-+
-+      if (pbytes != 0)
-+              lck->lk_private = (void *)lck + i;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+      lck->lk_depth = depth;
-+      INIT_LIST_HEAD(&lck->lk_blocked_list);
-+
-+      for (i = 0; i < depth; i++) {
-+              struct htree_lock_node *node = &lck->lk_nodes[i];
-+
-+              node->ln_mode = HTREE_LOCK_INVAL;
-+              INIT_LIST_HEAD(&node->ln_major_list);
-+              INIT_LIST_HEAD(&node->ln_minor_list);
-+              INIT_LIST_HEAD(&node->ln_alive_list);
-+              INIT_LIST_HEAD(&node->ln_blocked_list);
-+              INIT_LIST_HEAD(&node->ln_granted_list);
-+      }
-+
-+      return lck;
-+}
-+EXPORT_SYMBOL(htree_lock_alloc);
-+
-+/* free htree_lock node */
-+void
-+htree_lock_free(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
-+      kfree(lck);
-+}
-+EXPORT_SYMBOL(htree_lock_free);
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
- ext4-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
-               ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+              htree_lock.o \
-               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-               mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
-               xattr_trusted.o inline.o
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
-@@ -27,6 +27,7 @@
- #include <linux/mutex.h>
- #include <linux/timer.h>
- #include <linux/wait.h>
-+#include <linux/htree_lock.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #include <linux/ratelimit.h>
-@@ -821,6 +822,9 @@ struct ext4_inode_info {
-       __u32   i_dtime;
-       ext4_fsblk_t    i_file_acl;
-+      /* following fields for parallel directory operations -bzzz */
-+      struct semaphore i_append_sem;
-+
-       /*
-        * i_block_group is the number of the block group which contains
-        * this file's inode.  Constant across the lifetime of the inode,
-@@ -1846,6 +1850,71 @@ struct dx_hash_info
-  */
- #define HASH_NB_ALWAYS                1
-+/* assume name-hash is protected by upper layer */
-+#define EXT4_HTREE_LOCK_HASH  0
-+
-+enum ext4_pdo_lk_types {
-+#if EXT4_HTREE_LOCK_HASH
-+      EXT4_LK_HASH,
-+#endif
-+      EXT4_LK_DX,             /* index block */
-+      EXT4_LK_DE,             /* directory entry block */
-+      EXT4_LK_SPIN,           /* spinlock */
-+      EXT4_LK_MAX,
-+};
-+
-+/* read-only bit */
-+#define EXT4_LB_RO(b)         (1 << (b))
-+/* read + write, high bits for writer */
-+#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
-+
-+enum ext4_pdo_lock_bits {
-+      /* DX lock bits */
-+      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
-+      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
-+      /* DE lock bits */
-+      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
-+      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
-+      /* DX spinlock bits */
-+      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
-+      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
-+      /* accurate searching */
-+      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
-+};
-+
-+enum ext4_pdo_lock_opc {
-+      /* external */
-+      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
-+      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
-+
-+      /* internal */
-+      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
-+      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
-+};
-+
-+extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
-+#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
-+
-+extern struct htree_lock *ext4_htree_lock_alloc(void);
-+#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
-+
-+extern void ext4_htree_lock(struct htree_lock *lck,
-+                          struct htree_lock_head *lhead,
-+                          struct inode *dir, unsigned flags);
-+#define ext4_htree_unlock(lck)                  htree_unlock(lck)
-+
-+extern struct buffer_head *__ext4_find_entry(struct inode *dir,
-+                                      const struct qstr *d_name,
-+                                      struct ext4_dir_entry_2 **res_dir,
-+                                      int *inlined, struct htree_lock *lck);
-+extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck);
- /*
-  * Describe an inode's exact location on disk and in memory
-@@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in
-                       const char *name, int namelen, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-+      /* Disable it for ldiskfs, because going from a DX directory to
-+       * a non-DX directory while it is in use will completely break
-+       * the htree-locking.
-+       * If we really want to support this operation in the future,
-+       * we need to exclusively lock the directory at here which will
-+       * increase complexity of code */
-+#if 0
-       if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
-                                    EXT4_FEATURE_COMPAT_DIR_INDEX))
-               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-+#endif
- }
- static unsigned char ext4_filetype_table[] = {
-       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/namei.c
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
-@@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
-                                       ext4_lblk_t *block)
- {
-       struct buffer_head *bh;
-+      struct ext4_inode_info *ei = EXT4_I(inode);
-       int err = 0;
-       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
-@@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
-                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
-               return ERR_PTR(-ENOSPC);
-+      /* with parallel dir operations all appends
-+      * have to be serialized -bzzz */
-+      down(&ei->i_append_sem);
-+
-       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-       bh = ext4_bread(handle, inode, *block, 1, &err);
--      if (!bh)
-+      if (!bh) {
-+              up(&ei->i_append_sem);
-               return ERR_PTR(err);
-+      }
-       inode->i_size += inode->i_sb->s_blocksize;
-       EXT4_I(inode)->i_disksize = inode->i_size;
-       BUFFER_TRACE(bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, bh);
-+      up(&ei->i_append_sem);
-       if (err) {
-               brelse(bh);
-               ext4_std_error(inode->i_sb, err);
-@@ -246,7 +254,7 @@ static struct dx_frame *dx_probe(const s
-                                struct inode *dir,
-                                struct dx_hash_info *hinfo,
-                                struct dx_frame *frame,
--                               int *err);
-+                               struct htree_lock *lck, int *err);
- static void dx_release(struct dx_frame *frames);
- static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
-                      struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-@@ -259,13 +267,13 @@ static void dx_insert_block(struct dx_fr
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash);
-+                               __u32 *start_hash, struct htree_lock *lck);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-               const struct qstr *d_name,
-               struct ext4_dir_entry_2 **res_dir,
--              int *err);
-+              struct htree_lock *lck, int *err);
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
--                           struct inode *inode);
-+                           struct inode *inode, struct htree_lock *lck);
- /* checksumming functions */
- void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
-@@ -668,6 +676,227 @@ struct stats dx_show_entries(struct dx_h
- }
- #endif /* DX_DEBUG */
-+/* private data for htree_lock */
-+struct ext4_dir_lock_data {
-+      unsigned                ld_flags;  /* bits-map for lock types */
-+      unsigned                ld_count;  /* # entries of the last DX block */
-+      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
-+      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
-+};
-+
-+#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
-+#define ext4_find_entry(dir, name, dirent, inline) \
-+                      __ext4_find_entry(dir, name, dirent, inline, NULL)
-+#define ext4_add_entry(handle, dentry, inode) \
-+                      __ext4_add_entry(handle, dentry, inode, NULL)
-+
-+/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
-+#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
-+
-+static void ext4_htree_event_cb(void *target, void *event)
-+{
-+      u64 *block = (u64 *)target;
-+
-+      if (*block == dx_get_block((struct dx_entry *)event))
-+              *block = EXT4_HTREE_NODE_CHANGED;
-+}
-+
-+struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
-+{
-+      struct htree_lock_head *lhead;
-+
-+      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
-+      if (lhead != NULL) {
-+              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
-+                                      ext4_htree_event_cb);
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
-+
-+struct htree_lock *ext4_htree_lock_alloc(void)
-+{
-+      return htree_lock_alloc(EXT4_LK_MAX,
-+                              sizeof(struct ext4_dir_lock_data));
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_alloc);
-+
-+static htree_lock_mode_t ext4_htree_mode(unsigned flags)
-+{
-+      switch (flags) {
-+      default: /* 0 or unknown flags require EX lock */
-+              return HTREE_LOCK_EX;
-+      case EXT4_HLOCK_READDIR:
-+              return HTREE_LOCK_PR;
-+      case EXT4_HLOCK_LOOKUP:
-+              return HTREE_LOCK_CR;
-+      case EXT4_HLOCK_DEL:
-+      case EXT4_HLOCK_ADD:
-+              return HTREE_LOCK_CW;
-+      }
-+}
-+
-+/* return PR for read-only operations, otherwise return EX */
-+static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
-+{
-+      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
-+
-+      /* 0 requires EX lock */
-+      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
-+}
-+
-+static int ext4_htree_safe_locked(struct htree_lock *lck)
-+{
-+      int writer;
-+
-+      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
-+              return 1;
-+
-+      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
-+               EXT4_LB_DE;
-+      if (writer) /* all readers & writers are excluded? */
-+              return lck->lk_mode == HTREE_LOCK_EX;
-+
-+      /* all writers are excluded? */
-+      return lck->lk_mode == HTREE_LOCK_PR ||
-+             lck->lk_mode == HTREE_LOCK_PW ||
-+             lck->lk_mode == HTREE_LOCK_EX;
-+}
-+
-+/* relock htree_lock with EX mode if it's change operation, otherwise
-+ * relock it with PR mode. It's noop if PDO is disabled. */
-+static void ext4_htree_safe_relock(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck)) {
-+              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
-+
-+              htree_change_lock(lck, ext4_htree_safe_mode(flags));
-+      }
-+}
-+
-+void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                   struct inode *dir, unsigned flags)
-+{
-+      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
-+                                            ext4_htree_safe_mode(flags);
-+
-+      ext4_htree_lock_data(lck)->ld_flags = flags;
-+      htree_lock(lck, lhead, mode);
-+      if (!is_dx(dir))
-+              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
-+}
-+EXPORT_SYMBOL(ext4_htree_lock);
-+
-+static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
-+                              unsigned lmask, int wait, void *ev)
-+{
-+      u32     key = (at == NULL) ? 0 : dx_get_block(at);
-+      u32     mode;
-+
-+      /* NOOP if htree is well protected or caller doesn't require the lock */
-+      if (ext4_htree_safe_locked(lck) ||
-+         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
-+              return 1;
-+
-+      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
-+              HTREE_LOCK_PW : HTREE_LOCK_PR;
-+      while (1) {
-+              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
-+                      return 1;
-+              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
-+                      return 0;
-+              cpu_relax(); /* spin until granted */
-+      }
-+}
-+
-+static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
-+{
-+      return ext4_htree_safe_locked(lck) ||
-+             htree_node_is_granted(lck, ffz(~lmask));
-+}
-+
-+static void ext4_htree_node_unlock(struct htree_lock *lck,
-+                                 unsigned lmask, void *buf)
-+{
-+      /* NB: it's safe to call mutiple times or even it's not locked */
-+      if (!ext4_htree_safe_locked(lck) &&
-+           htree_node_is_granted(lck, ffz(~lmask)))
-+              htree_node_unlock(lck, ffz(~lmask), buf);
-+}
-+
-+#define ext4_htree_dx_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
-+#define ext4_htree_dx_lock_try(lck, key)      \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
-+#define ext4_htree_dx_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
-+#define ext4_htree_dx_locked(lck)             \
-+      ext4_htree_node_locked(lck, EXT4_LB_DX)
-+
-+static void ext4_htree_dx_need_lock(struct htree_lock *lck)
-+{
-+      struct ext4_dir_lock_data *ld;
-+
-+      if (ext4_htree_safe_locked(lck))
-+              return;
-+
-+      ld = ext4_htree_lock_data(lck);
-+      switch (ld->ld_flags) {
-+      default:
-+              return;
-+      case EXT4_HLOCK_LOOKUP:
-+              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
-+              return;
-+      case EXT4_HLOCK_DEL:
-+              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
-+              return;
-+      case EXT4_HLOCK_ADD:
-+              ld->ld_flags = EXT4_HLOCK_SPLIT;
-+              return;
-+      }
-+}
-+
-+#define ext4_htree_de_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
-+#define ext4_htree_de_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
-+
-+#define ext4_htree_spin_lock(lck, key, event) \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
-+#define ext4_htree_spin_unlock(lck)           \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
-+#define ext4_htree_spin_unlock_listen(lck, p) \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
-+
-+static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck) &&
-+          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
-+              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
-+}
-+
-+enum {
-+      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
-+      DX_HASH_COL_YES,        /* there is collision and it does matter */
-+      DX_HASH_COL_NO,         /* there is no collision */
-+};
-+
-+static int dx_probe_hash_collision(struct htree_lock *lck,
-+                                 struct dx_entry *entries,
-+                                 struct dx_entry *at, u32 hash)
-+{
-+      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
-+              return DX_HASH_COL_IGNORE; /* don't care about collision */
-+
-+      } else if (at == entries + dx_get_count(entries) - 1) {
-+              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
-+
-+      } else { /* hash collision? */
-+              return ((dx_get_hash(at + 1) & ~1) == hash) ?
-+                      DX_HASH_COL_YES : DX_HASH_COL_NO;
-+      }
-+}
-+
- /*
-  * Probe for a directory leaf block to search.
-  *
-@@ -679,10 +908,11 @@ struct stats dx_show_entries(struct dx_h
-  */
- static struct dx_frame *
- dx_probe(const struct qstr *d_name, struct inode *dir,
--       struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-+       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
-+       struct htree_lock *lck, int *err)
- {
-       unsigned count, indirect;
--      struct dx_entry *at, *entries, *p, *q, *m;
-+      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
-       struct dx_root_info *info;
-       struct buffer_head *bh;
-       struct dx_frame *frame = frame_in;
-@@ -750,8 +980,15 @@ dx_probe(const struct qstr *d_name, stru
-       dxtrace(printk("Look up %x", hash));
-       while (1)
-       {
-+              if (indirect == 0) { /* the last index level */
-+                      /* NB: ext4_htree_dx_lock() could be noop if
-+                       * DX-lock flag is not set for current operation */
-+                      ext4_htree_dx_lock(lck, dx);
-+                      ext4_htree_spin_lock(lck, dx, NULL);
-+              }
-               count = dx_get_count(entries);
--              if (!count || count > dx_get_limit(entries)) {
-+              if (count == 0 || count > dx_get_limit(entries)) {
-+                      ext4_htree_spin_unlock(lck); /* release spin */
-                       ext4_warning(dir->i_sb,
-                                    "dx entry: no count or count > limit");
-                       brelse(bh);
-@@ -792,7 +1029,70 @@ dx_probe(const struct qstr *d_name, stru
-               frame->bh = bh;
-               frame->entries = entries;
-               frame->at = at;
--              if (!indirect--) return frame;
-+
-+              if (indirect == 0) { /* the last index level */
-+                      struct ext4_dir_lock_data *ld;
-+                      u64 myblock;
-+
-+                      /* By default we only lock DE-block, however, we will
-+                       * also lock the last level DX-block if:
-+                       * a) there is hash collision
-+                       *    we will set DX-lock flag (a few lines below)
-+                       *    and redo to lock DX-block
-+                       *    see detail in dx_probe_hash_collision()
-+                       * b) it's a retry from splitting
-+                       *    we need to lock the last level DX-block so nobody
-+                       *    else can split any leaf blocks under the same
-+                       *    DX-block, see detail in ext4_dx_add_entry()
-+                       */
-+                      if (ext4_htree_dx_locked(lck)) {
-+                              /* DX-block is locked, just lock DE-block
-+                               * and return */
-+                              ext4_htree_spin_unlock(lck);
-+                              if (!ext4_htree_safe_locked(lck))
-+                                      ext4_htree_de_lock(lck, frame->at);
-+                              return frame;
-+                      }
-+                      /* it's pdirop and no DX lock */
-+                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
-+                          DX_HASH_COL_YES) {
-+                              /* found hash collision, set DX-lock flag
-+                               * and retry to abtain DX-lock */
-+                              ext4_htree_spin_unlock(lck);
-+                              ext4_htree_dx_need_lock(lck);
-+                              continue;
-+                      }
-+                      ld = ext4_htree_lock_data(lck);
-+                      /* because I don't lock DX, so @at can't be trusted
-+                       * after I release spinlock so I have to save it */
-+                      ld->ld_at = at;
-+                      ld->ld_at_entry = *at;
-+                      ld->ld_count = dx_get_count(entries);
-+
-+                      frame->at = &ld->ld_at_entry;
-+                      myblock = dx_get_block(at);
-+
-+                      /* NB: ordering locking */
-+                      ext4_htree_spin_unlock_listen(lck, &myblock);
-+                      /* other thread can split this DE-block because:
-+                       * a) I don't have lock for the DE-block yet
-+                       * b) I released spinlock on DX-block
-+                       * if it happened I can detect it by listening
-+                       * splitting event on this DE-block */
-+                      ext4_htree_de_lock(lck, frame->at);
-+                      ext4_htree_spin_stop_listen(lck);
-+
-+                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
-+                              /* someone split this DE-block before
-+                               * I locked it, I need to retry and lock
-+                               * valid DE-block */
-+                              ext4_htree_de_unlock(lck);
-+                              continue;
-+                      }
-+                      return frame;
-+              }
-+              dx = at;
-+              indirect--;
-               bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
-               if (IS_ERR(bh)) {
-                       *err = PTR_ERR(bh);
-@@ -860,7 +1160,7 @@ static void dx_release (struct dx_frame
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash)
-+                               __u32 *start_hash, struct htree_lock *lck)
- {
-       struct dx_frame *p;
-       struct buffer_head *bh;
-@@ -875,12 +1175,22 @@ static int ext4_htree_next_block(struct
-        * this loop, num_frames indicates the number of interior
-        * nodes need to be read.
-        */
-+      ext4_htree_de_unlock(lck);
-       while (1) {
--              if (++(p->at) < p->entries + dx_get_count(p->entries))
--                      break;
-+              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
-+                      /* num_frames > 0 :
-+                       *   DX block
-+                       * ext4_htree_dx_locked:
-+                       *   frame->at is reliable pointer returned by dx_probe,
-+                       *   otherwise dx_probe already knew no collision */
-+                      if (++(p->at) < p->entries + dx_get_count(p->entries))
-+                              break;
-+              }
-               if (p == frames)
-                       return 0;
-               num_frames++;
-+              if (num_frames == 1)
-+                      ext4_htree_dx_unlock(lck);
-               p--;
-       }
-@@ -903,6 +1213,13 @@ static int ext4_htree_next_block(struct
-        * block so no check is necessary
-        */
-       while (num_frames--) {
-+              if (num_frames == 0) {
-+                      /* it's not always necessary, we just don't want to
-+                       * detect hash collision again */
-+                      ext4_htree_dx_need_lock(lck);
-+                      ext4_htree_dx_lock(lck, p->at);
-+              }
-+
-               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
-@@ -911,6 +1228,7 @@ static int ext4_htree_next_block(struct
-               p->bh = bh;
-               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-       }
-+      ext4_htree_de_lock(lck, p->at);
-       return 1;
- }
-@@ -1013,10 +1331,10 @@ int ext4_htree_fill_tree(struct file *di
-       }
-       hinfo.hash = start_hash;
-       hinfo.minor_hash = 0;
--      frame = dx_probe(NULL, dir, &hinfo, frames, &err);
-+      /* assume it's PR locked */
-+      frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
-       if (!frame)
-               return err;
--
-       /* Add '.' and '..' from the htree header */
-       if (!start_hash && !start_minor_hash) {
-               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-@@ -1043,7 +1361,7 @@ int ext4_htree_fill_tree(struct file *di
-               count += ret;
-               hashval = ~0;
-               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
--                                          frame, frames, &hashval);
-+                                          frame, frames, &hashval, NULL);
-               *next_hash = hashval;
-               if (ret < 0) {
-                       err = ret;
-@@ -1236,10 +1554,10 @@ static int is_dx_internal_node(struct in
-  * The returned buffer_head has ->b_count elevated.  The caller is expected
-  * to brelse() it when appropriate.
-  */
--static struct buffer_head * ext4_find_entry (struct inode *dir,
-+struct buffer_head *__ext4_find_entry(struct inode *dir,
-                                       const struct qstr *d_name,
-                                       struct ext4_dir_entry_2 **res_dir,
--                                      int *inlined)
-+                                      int *inlined, struct htree_lock *lck)
- {
-       struct super_block *sb;
-       struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -1283,7 +1601,7 @@ static struct buffer_head * ext4_find_en
-               goto restart;
-       }
-       if (is_dx(dir)) {
--              bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
-+              bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
-               /*
-                * On success, or if the error was file not found,
-                * return.  Otherwise, fall back to doing a search the
-@@ -1297,6 +1615,7 @@ static struct buffer_head * ext4_find_en
-                       return bh;
-               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
-                              "falling back\n"));
-+              ext4_htree_safe_relock(lck);
-       }
-       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
-       start = EXT4_I(dir)->i_dir_start_lookup;
-@@ -1389,9 +1708,12 @@ cleanup_and_exit:
-               brelse(bh_use[ra_ptr]);
-       return ret;
- }
-+EXPORT_SYMBOL(__ext4_find_entry);
--static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
--                     struct ext4_dir_entry_2 **res_dir, int *err)
-+static struct buffer_head *ext4_dx_find_entry(struct inode *dir,
-+                              const struct qstr *d_name,
-+                              struct ext4_dir_entry_2 **res_dir,
-+                              struct htree_lock *lck, int *err)
- {
-       struct super_block * sb = dir->i_sb;
-       struct dx_hash_info     hinfo;
-@@ -1400,7 +1722,7 @@ static struct buffer_head * ext4_dx_find
-       ext4_lblk_t block;
-       int retval;
--      if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-+      if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
-               return NULL;
-       do {
-               block = dx_get_block(frame->at);
-@@ -1424,7 +1746,7 @@ static struct buffer_head * ext4_dx_find
-               /* Check to see if we should continue to search */
-               retval = ext4_htree_next_block(dir, hinfo.hash, frame,
--                                             frames, NULL);
-+                                             frames, NULL, lck);
-               if (retval < 0) {
-                       ext4_warning(sb,
-                            "error reading index page in directory #%lu",
-@@ -1583,8 +1905,9 @@ static struct ext4_dir_entry_2* dx_pack_
-  * Returns pointer to de in block into which the new entry will be inserted.
-  */
- static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
--                      struct buffer_head **bh,struct dx_frame *frame,
--                      struct dx_hash_info *hinfo, int *error)
-+                      struct buffer_head **bh, struct dx_frame *frames,
-+                      struct dx_frame *frame, struct dx_hash_info *hinfo,
-+                      struct htree_lock *lck, int *error)
- {
-       unsigned blocksize = dir->i_sb->s_blocksize;
-       unsigned count, continued;
-@@ -1647,7 +1970,14 @@ static struct ext4_dir_entry_2 *do_split
-                                       hash2, split, count-split));
-       /* Fancy dance to stay within two buffers */
--      de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
-+      if (hinfo->hash < hash2) {
-+              de2 = dx_move_dirents(data1, data2, map + split,
-+                                    count - split, blocksize);
-+      } else {
-+              /* make sure we will add entry to the same block which
-+               * we have already locked */
-+              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
-+      }
-       de = dx_pack_dirents(data1, blocksize);
-       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
-                                          (char *) de,
-@@ -1666,13 +1996,21 @@ static struct ext4_dir_entry_2 *do_split
-       dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
-       dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
--      /* Which block gets the new entry? */
--      if (hinfo->hash >= hash2)
--      {
--              swap(*bh, bh2);
--              de = de2;
-+      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
-+                           frame->at); /* notify block is being split */
-+      if (hinfo->hash < hash2) {
-+              dx_insert_block(frame, hash2 + continued, newblock);
-+
-+      } else {
-+              /* switch block number */
-+              dx_insert_block(frame, hash2 + continued,
-+                              dx_get_block(frame->at));
-+              dx_set_block(frame->at, newblock);
-+              (frame->at)++;
-       }
--      dx_insert_block(frame, hash2 + continued, newblock);
-+      ext4_htree_spin_unlock(lck);
-+      ext4_htree_dx_unlock(lck);
-+
-       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
-       if (err)
-               goto journal_error;
-@@ -1945,7 +2283,7 @@ static int make_indexed_dir(handle_t *ha
-       ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-       ext4_handle_dirty_dirent_node(handle, dir, bh);
--      de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-+      de = do_split(handle, dir, &bh, frames, frame, &hinfo, NULL, &retval);
-       if (!de) {
-               /*
-                * Even if the block split failed, we have to properly write
-@@ -2051,8 +2389,8 @@ out:
-  * may not sleep between calling this and putting something into
-  * the entry, as someone else might have used it while you slept.
-  */
--static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
--                        struct inode *inode)
-+int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck)
- {
-       struct inode *dir = dentry->d_parent->d_inode;
-       struct buffer_head *bh;
-@@ -2087,9 +2425,10 @@ static int ext4_add_entry(handle_t *hand
-               if (dentry->d_name.len == 2 &&
-                   memcmp(dentry->d_name.name, "..", 2) == 0)
-                       return ext4_update_dotdot(handle, dentry, inode);
--              retval = ext4_dx_add_entry(handle, dentry, inode);
-+              retval = ext4_dx_add_entry(handle, dentry, inode, lck);
-               if (!retval || (retval != ERR_BAD_DX_DIR))
-                       goto out;
-+              ext4_htree_safe_relock(lck);
-               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
-               dx_fallback++;
-               ext4_mark_inode_dirty(handle, dir);
-@@ -2129,12 +2468,13 @@ static int ext4_add_entry(handle_t *hand
-               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
-       return retval;
- }
-+EXPORT_SYMBOL(__ext4_add_entry);
- /*
-  * Returns 0 for success, or a negative error value
-  */
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
--                           struct inode *inode)
-+                           struct inode *inode, struct htree_lock *lck)
- {
-       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-@@ -2148,7 +2488,7 @@ static int ext4_dx_add_entry(handle_t *h
- again:
-       restart = 0;
--      frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-+      frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
-       if (!frame)
-               return err;
-       entries = frame->entries;
-@@ -2178,6 +2518,11 @@ again:
-               struct dx_node *node2;
-               struct buffer_head *bh2;
-+              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
-+                      ext4_htree_safe_relock(lck);
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-               while (frame > frames) {
-                       if (dx_get_count((frame - 1)->entries) <
-                           dx_get_limit((frame - 1)->entries)) {
-@@ -2277,16 +2622,43 @@ again:
-                       restart = 1;
-                       goto journal_error;
-               }
-+      } else if (!ext4_htree_dx_locked(lck)) {
-+              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
-+
-+              /* not well protected, require DX lock */
-+              ext4_htree_dx_need_lock(lck);
-+              at = frame > frames ? (frame - 1)->at : NULL;
-+
-+              /* NB: no risk of deadlock because it's just a try.
-+               *
-+               * NB: we check ld_count for twice, the first time before
-+               * having DX lock, the second time after holding DX lock.
-+               *
-+               * NB: We never free blocks for directory so far, which
-+               * means value returned by dx_get_count() should equal to
-+               * ld->ld_count if nobody split any DE-block under @at,
-+               * and ld->ld_at still points to valid dx_entry. */
-+              if ((ld->ld_count != dx_get_count(entries)) ||
-+                  !ext4_htree_dx_lock_try(lck, at) ||
-+                  (ld->ld_count != dx_get_count(entries))) {
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-+              /* OK, I've got DX lock and nothing changed */
-+              frame->at = ld->ld_at;
-       }
--      de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-+      de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
-       if (!de)
-               goto cleanup;
-+
-       err = add_dirent_to_buf(handle, dentry, inode, de, bh);
-       goto cleanup;
- journal_error:
-       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-+      ext4_htree_dx_unlock(lck);
-+      ext4_htree_de_unlock(lck);
-       brelse(bh);
-       dx_release(frames);
-       /* @restart is true means htree-path has been changed, we need to
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c
-@@ -875,5 +875,6 @@ static struct inode *ext4_alloc_inode(st
-       ei->vfs_inode.i_version = 1;
-+      sema_init(&ei->i_append_sem, 1);
-       INIT_LIST_HEAD(&ei->i_prealloc_list);
-       spin_lock_init(&ei->i_prealloc_lock);
-       ext4_es_init_tree(&ei->i_es_tree);
diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch
deleted file mode 100644 (file)
index 5de7cef..0000000
+++ /dev/null
@@ -1,399 +0,0 @@
-Index: linux-3.12.39-47.1/fs/ext4/ext4.h
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/ext4.h
-+++ linux-3.12.39-47.1/fs/ext4/ext4.h
-@@ -1251,11 +1251,14 @@ struct ext4_sb_info {
-       /* tunables */
-       unsigned long s_stripe;
--      unsigned int s_mb_stream_request;
-+      unsigned long s_mb_small_req;
-+      unsigned long s_mb_large_req;
-       unsigned int s_mb_max_to_scan;
-       unsigned int s_mb_min_to_scan;
-       unsigned int s_mb_stats;
-       unsigned int s_mb_order2_reqs;
-+      unsigned long *s_mb_prealloc_table;
-+      unsigned long s_mb_prealloc_table_size;
-       unsigned int s_mb_group_prealloc;
-       unsigned int s_max_dir_size_kb;
-       /* where last allocation was done - for stream allocation */
-Index: linux-3.12.39-47.1/fs/ext4/mballoc.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/mballoc.c
-+++ linux-3.12.39-47.1/fs/ext4/mballoc.c
-@@ -1847,6 +1847,26 @@ int ext4_mb_find_by_goal(struct ext4_all
-       return 0;
- }
-+static int ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
-+{
-+      int i;
-+
-+      if (value > (sbi->s_clusters_per_group - 1 - 1 - sbi->s_itb_per_group))
-+              return -1;
-+
-+      for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+              if (sbi->s_mb_prealloc_table[i] == 0) {
-+                      sbi->s_mb_prealloc_table[i] = value;
-+                      return 0;
-+              }
-+
-+              /* they should add values in order */
-+              if (value <= sbi->s_mb_prealloc_table[i])
-+                      return -1;
-+      }
-+      return -1;
-+}
-+
- /*
-  * The routine scans buddy structures (not bitmap!) from given order
-  * to max order and tries to find big enough chunk to satisfy the req
-@@ -2285,6 +2304,90 @@ static const struct seq_operations ext4_
-       .show   = ext4_mb_seq_groups_show,
- };
-+#define EXT4_MB_PREALLOC_TABLE          "prealloc_table"
-+
-+static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
-+                                           const char __user *buf,
-+                                           size_t cnt, loff_t *pos)
-+{
-+      struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
-+      unsigned long value;
-+      unsigned long prev = 0;
-+      char str[128];
-+      char *cur;
-+      char *end;
-+      unsigned long *new_table;
-+      int num = 0;
-+      int i = 0;
-+
-+      if (cnt >= sizeof(str))
-+              return -EINVAL;
-+      if (copy_from_user(str, buf, cnt))
-+              return -EFAULT;
-+
-+      num = 0;
-+      cur = str;
-+      end = str + cnt;
-+      while (cur < end) {
-+              while ((cur < end) && (*cur == ' '))
-+                      cur++;
-+              value = simple_strtol(cur, &cur, 0);
-+              if (value == 0)
-+                      break;
-+              if (value <= prev)
-+                      return -EINVAL;
-+              prev = value;
-+              num++;
-+      }
-+
-+      new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
-+      if (new_table == NULL)
-+              return -ENOMEM;
-+      kfree(sbi->s_mb_prealloc_table);
-+      memset(new_table, 0, num * sizeof(*new_table));
-+      sbi->s_mb_prealloc_table = new_table;
-+      sbi->s_mb_prealloc_table_size = num;
-+      cur = str;
-+      end = str + cnt;
-+      while (cur < end && i < num) {
-+              while (cur < end && *cur == ' ')
-+                      cur++;
-+              value = simple_strtol(cur, &cur, 0);
-+              if (ext4_mb_prealloc_table_add(sbi, value) == 0)
-+                      ++i;
-+      }
-+      if (i != num)
-+              sbi->s_mb_prealloc_table_size = i;
-+
-+      return cnt;
-+}
-+
-+static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
-+{
-+      struct ext4_sb_info *sbi = EXT4_SB(m->private);
-+      int i;
-+
-+      for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
-+              seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
-+      seq_printf(m, "\n");
-+
-+      return 0;
-+}
-+
-+static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
-+{
-+      return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
-+}
-+
-+static const struct file_operations ext4_mb_prealloc_seq_fops = {
-+      .owner   = THIS_MODULE,
-+      .open    = mb_prealloc_table_seq_open,
-+      .read    = seq_read,
-+      .llseek  = seq_lseek,
-+      .release = single_release,
-+      .write   = ext4_mb_prealloc_table_proc_write,
-+};
-+
- static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
- {
-       struct super_block *sb = PDE_DATA(inode);
-@@ -2550,7 +2657,7 @@ static int ext4_groupinfo_create_slab(si
- int ext4_mb_init(struct super_block *sb)
- {
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
--      unsigned i, j;
-+      unsigned i, j, k, l;
-       unsigned offset, offset_incr;
-       unsigned max;
-       int ret;
-@@ -2595,7 +2702,6 @@ int ext4_mb_init(struct super_block *sb)
-       sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
-       sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
-       sbi->s_mb_stats = MB_DEFAULT_STATS;
--      sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
-       sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
-       /*
-        * The default group preallocation is 512, which for 4k block
-@@ -2619,9 +2725,47 @@ int ext4_mb_init(struct super_block *sb)
-        * RAID stripe size so that preallocations don't fragment
-        * the stripes.
-        */
--      if (sbi->s_stripe > 1) {
--              sbi->s_mb_group_prealloc = roundup(
--                      sbi->s_mb_group_prealloc, sbi->s_stripe);
-+
-+      if (sbi->s_stripe == 0) {
-+              sbi->s_mb_prealloc_table_size = 10;
-+              i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+              sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+              if (sbi->s_mb_prealloc_table == NULL) {
-+                      ret = -ENOMEM;
-+                      goto out;
-+              }
-+              memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+              for (k = 0, l = 4; k <= 9; ++k, l *= 2) {
-+                      if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+                              sbi->s_mb_prealloc_table_size = k;
-+                              break;
-+                      }
-+              }
-+
-+              sbi->s_mb_small_req = 256;
-+              sbi->s_mb_large_req = 1024;
-+              sbi->s_mb_group_prealloc = 512;
-+      } else {
-+              sbi->s_mb_prealloc_table_size = 3;
-+              i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+              sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+              if (sbi->s_mb_prealloc_table == NULL) {
-+                      ret = -ENOMEM;
-+                      goto out;
-+              }
-+              memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+              for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2) {
-+                      if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+                              sbi->s_mb_prealloc_table_size = k;
-+                              break;
-+                      }
-+              }
-+
-+              sbi->s_mb_small_req = sbi->s_stripe;
-+              sbi->s_mb_large_req = sbi->s_stripe * 8;
-+              sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-       }
-       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
-@@ -2627,9 +2769,13 @@ int ext4_mb_init(struct super_block *sb)
-       if (ret != 0)
-               goto out_free_locality_groups;
--      if (sbi->s_proc)
-+      if (sbi->s_proc) {
-               proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
-                                &ext4_mb_seq_groups_fops, sb);
-+              proc_create_data(EXT4_MB_PREALLOC_TABLE, S_IFREG | S_IRUGO |
-+                               S_IWUSR, sbi->s_proc,
-+                               &ext4_mb_prealloc_seq_fops, sb);
-+      }
-       return 0;
-@@ -2639,6 +2785,7 @@ out_free_locality_groups:
- out_free_groupinfo_slab:
-       ext4_groupinfo_destroy_slabs();
- out:
-+      kfree(sbi->s_mb_prealloc_table);
-       kfree(sbi->s_mb_offsets);
-       sbi->s_mb_offsets = NULL;
-       kfree(sbi->s_mb_maxs);
-@@ -2673,8 +2820,10 @@ int ext4_mb_release(struct super_block *
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
--      if (sbi->s_proc)
-+      if (sbi->s_proc) {
-               remove_proc_entry("mb_groups", sbi->s_proc);
-+              remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+      }
-       if (sbi->s_group_info) {
-               for (i = 0; i < ngroups; i++) {
-@@ -2985,9 +3134,9 @@ ext4_mb_normalize_request(struct ext4_al
-                               struct ext4_allocation_request *ar)
- {
-       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
--      int bsbits, max;
-+      int bsbits, i, wind;
-       ext4_lblk_t end;
--      loff_t size, start_off;
-+      loff_t size;
-       loff_t orig_size __maybe_unused;
-       ext4_lblk_t start;
-       struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
-@@ -3020,50 +3169,34 @@ ext4_mb_normalize_request(struct ext4_al
-       size = size << bsbits;
-       if (size < i_size_read(ac->ac_inode))
-               size = i_size_read(ac->ac_inode);
--      orig_size = size;
-+      size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
--      /* max size of free chunks */
--      max = 2 << bsbits;
-+      start = wind = 0;
--#define NRL_CHECK_SIZE(req, size, max, chunk_size)    \
--              (req <= (size) || max <= (chunk_size))
-+      /* let's choose preallocation window depending on file size */
-+      for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+              if (size <= sbi->s_mb_prealloc_table[i]) {
-+                      wind = sbi->s_mb_prealloc_table[i];
-+                      break;
-+              }
-+      }
-+      size = wind;
--      /* first, try to predict filesize */
--      /* XXX: should this table be tunable? */
--      start_off = 0;
--      if (size <= 16 * 1024) {
--              size = 16 * 1024;
--      } else if (size <= 32 * 1024) {
--              size = 32 * 1024;
--      } else if (size <= 64 * 1024) {
--              size = 64 * 1024;
--      } else if (size <= 128 * 1024) {
--              size = 128 * 1024;
--      } else if (size <= 256 * 1024) {
--              size = 256 * 1024;
--      } else if (size <= 512 * 1024) {
--              size = 512 * 1024;
--      } else if (size <= 1024 * 1024) {
--              size = 1024 * 1024;
--      } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
--              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
--                                              (21 - bsbits)) << 21;
--              size = 2 * 1024 * 1024;
--      } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
--              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
--                                                      (22 - bsbits)) << 22;
--              size = 4 * 1024 * 1024;
--      } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
--                                      (8<<20)>>bsbits, max, 8 * 1024)) {
--              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
--                                                      (23 - bsbits)) << 23;
--              size = 8 * 1024 * 1024;
--      } else {
--              start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
--              size      = ac->ac_o_ex.fe_len << bsbits;
-+      if (wind == 0) {
-+              __u64 tstart, tend;
-+              /* file is quite large, we now preallocate with
-+               * the biggest configured window with regart to
-+               * logical offset */
-+              wind = sbi->s_mb_prealloc_table[i - 1];
-+              tstart = ac->ac_o_ex.fe_logical;
-+              do_div(tstart, wind);
-+              start = tstart * wind;
-+              tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
-+              do_div(tend, wind);
-+              tend = tend * wind + wind;
-+              size = tend - start;
-       }
--      size = size >> bsbits;
--      start = start_off >> bsbits;
-+      orig_size = size;
-       /* don't cover already allocated blocks in selected range */
-       if (ar->pleft && start <= ar->lleft) {
-@@ -3139,7 +3272,6 @@ ext4_mb_normalize_request(struct ext4_al
-       }
-       BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
-                       start > ac->ac_o_ex.fe_logical);
--      BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
-       /* now prepare goal request */
-@@ -4105,11 +4237,19 @@ static void ext4_mb_group_or_file(struct
-       /* don't use group allocation for large files */
-       size = max(size, isize);
--      if (size > sbi->s_mb_stream_request) {
-+      if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
-+          (size >= sbi->s_mb_large_req)) {
-               ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
-               return;
-       }
-+      /*
-+       * request is so large that we don't care about
-+       * streaming - it overweights any possible seek
-+       */
-+      if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
-+              return;
-+
-       BUG_ON(ac->ac_lg != NULL);
-       /*
-        * locality group prealloc space are per cpu. The reason for having
-Index: linux-3.12.39-47.1/fs/ext4/super.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/super.c
-+++ linux-3.12.39-47.1/fs/ext4/super.c
-@@ -2592,7 +2592,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
- EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
--EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-+EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
-+EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
- EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
- EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
- EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
-@@ -2609,7 +2610,8 @@ static struct attribute *ext4_attrs[] =
-       ATTR_LIST(mb_max_to_scan),
-       ATTR_LIST(mb_min_to_scan),
-       ATTR_LIST(mb_order2_req),
--      ATTR_LIST(mb_stream_req),
-+      ATTR_LIST(mb_small_req),
-+      ATTR_LIST(mb_large_req),
-       ATTR_LIST(mb_group_prealloc),
-       ATTR_LIST(max_writeback_mb_bump),
-       ATTR_LIST(extent_max_zeroout_kb),
-Index: linux-3.12.39-47.1/fs/ext4/inode.c
-===================================================================
---- linux-3.12.39-47.1.orig/fs/ext4/inode.c
-+++ linux-3.12.39-47.1/fs/ext4/inode.c
-@@ -2457,6 +2457,9 @@ static int ext4_writepages(struct addres
-               ext4_journal_stop(handle);
-       }
-+      if (wbc->nr_to_write < sbi->s_mb_small_req)
-+              wbc->nr_to_write = sbi->s_mb_small_req;
-+
-       if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
-               range_whole = 1;
diff --git a/ldiskfs/kernel_patches/patches/sles12sp1/ext4-attach-jinode-in-writepages.patch b/ldiskfs/kernel_patches/patches/sles12sp1/ext4-attach-jinode-in-writepages.patch
deleted file mode 100644 (file)
index 7a66a1b..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
---- l1/fs/ext4/inode.c 2016-08-23 23:38:23.238091149 +0800
-+++ l2/fs/ext4/inode.c 2017-01-24 04:04:45.100092653 +0800
-@@ -2683,6 +2683,9 @@ static int ext4_writepages(struct addres
-               mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
-       }
-+      ret = ext4_inode_attach_jinode(inode);
-+      if (ret)
-+              goto out_writepages;
-       mpd.inode = inode;
-       mpd.wbc = wbc;
-       ext4_io_submit_init(&mpd.io_submit, wbc);
-@@ -4015,6 +4015,7 @@ int ext4_inode_attach_jinode(struct inod
-               jbd2_free_inode(jinode);
-       return 0;
- }
-+EXPORT_SYMBOL(ext4_inode_attach_jinode);
- /*
-  * ext4_truncate()
---- l1/fs/ext4/ext4.h  2017-01-24 04:21:28.989470587 +0800
-+++ l2/fs/ext4/ext4.h  2017-01-24 04:32:45.589800986 +0800
-@@ -2462,7 +2462,8 @@ extern int ext4_group_add_blocks(handle_
- extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
- /* inode.c */
-+#define HAVE_LDISKFS_INFO_JINODE
- struct buffer_head *ext4_getblk(handle_t *, struct inode *,
-                                               ext4_lblk_t, int, int *);
- struct buffer_head *ext4_bread(handle_t *, struct inode *,
-                                               ext4_lblk_t, int, int *);
diff --git a/ldiskfs/kernel_patches/patches/sles12sp1/ext4-large-dir.patch b/ldiskfs/kernel_patches/patches/sles12sp1/ext4-large-dir.patch
deleted file mode 100644 (file)
index bfe50f0..0000000
+++ /dev/null
@@ -1,356 +0,0 @@
-This INCOMPAT_LARGEDIR feature allows larger directories
-to be created in ldiskfs, both with directory sizes over
-2GB and and a maximum htree depth of 3 instead of the
-current limit of 2. These features are needed in order
-to exceed the current limit of approximately 10M entries
-in a single directory.
-
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h
-@@ -1585,7 +1585,8 @@ static inline void ext4_clear_state_flag
-                                        EXT4_FEATURE_INCOMPAT_EA_INODE| \
-                                        EXT4_FEATURE_INCOMPAT_MMP |    \
-                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
--                                       EXT4_FEATURE_INCOMPAT_INLINE_DATA)
-+                                       EXT4_FEATURE_INCOMPAT_INLINE_DATA| \
-+                                       EXT4_FEATURE_INCOMPAT_LARGEDIR)
- #define EXT4_FEATURE_RO_COMPAT_SUPP   (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
-                                        EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
-@@ -1999,6 +2000,9 @@ struct mmpd_data {
- # define NORET_TYPE   /**/
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND    noreturn,
-+/* htree levels for ext4 */
-+#define EXT4_HTREE_LEVEL_COMPAT 2
-+#define EXT4_HTREE_LEVEL      3
- struct ext4_xattr_ino_array {
-       unsigned int xia_count;         /* # of used item in the array */
-@@ -2472,13 +2476,16 @@ static inline void ext4_r_blocks_count_s
-       es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
- }
--static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
-+static inline loff_t ext4_isize(struct super_block *sb,
-+                              struct ext4_inode *raw_inode)
- {
--      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
-+      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)) ||
-+          (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) &&
-+          S_ISDIR(le16_to_cpu(raw_inode->i_mode))))
-               return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
-                       le32_to_cpu(raw_inode->i_size_lo);
--      else
--              return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
-+
-+      return (loff_t)le32_to_cpu(raw_inode->i_size_lo);
- }
- static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/namei.c
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c
-@@ -513,7 +513,14 @@ struct dx_root_info * dx_get_dx_info(str
- static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
- {
--      return le32_to_cpu(entry->block) & 0x00ffffff;
-+      return le32_to_cpu(entry->block) & 0x0fffffff;
-+}
-+
-+static inline int
-+ext4_dir_htree_level(struct super_block *sb)
-+{
-+      return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
-+              EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
- }
- static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
-@@ -681,7 +688,7 @@ dx_probe(const struct qstr *d_name, stru
-       struct dx_frame *frame = frame_in;
-       u32 hash;
--      frame->bh = NULL;
-+      memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
-       bh = ext4_read_dirblock(dir, 0, INDEX);
-       if (IS_ERR(bh)) {
-               *err = PTR_ERR(bh);
-@@ -714,10 +721,15 @@ dx_probe(const struct qstr *d_name, stru
-       }
-       indirect = info->indirect_levels;
--      if (indirect > 1) {
--              ext4_warning(dir->i_sb,
--                           "inode #%lu: unimplemented hash depth %u",
--                           dir->i_ino, info->indirect_levels);
-+      if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
-+              ext4_warning(dir->i_sb,
-+                           "inode #%lu: comm %s: htree depth %#06x exceed max depth %u",
-+                           dir->i_ino, current->comm, indirect,
-+                           ext4_dir_htree_level(dir->i_sb));
-+              if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
-+                      ext4_warning(dir->i_sb, "Enable large directory "
-+                                              "feature to access it");
-+              }
-               brelse(bh);
-               *err = ERR_BAD_DX_DIR;
-               goto fail;
-@@ -812,13 +826,18 @@ fail:
- static void dx_release (struct dx_frame *frames)
- {
-       struct dx_root_info *info;
-+      int i;
-+
-       if (frames[0].bh == NULL)
-               return;
-       info = dx_get_dx_info((struct ext4_dir_entry_2 *)frames[0].bh->b_data);
--      if (info->indirect_levels)
--              brelse(frames[1].bh);
--      brelse(frames[0].bh);
-+      for (i = 0; i <= info->indirect_levels; i++) {
-+              if (frames[i].bh == NULL)
-+                      break;
-+              brelse(frames[i].bh);
-+              frames[i].bh = NULL;
-+      }
- }
- /*
-@@ -960,7 +979,7 @@ int ext4_htree_fill_tree(struct file *di
- {
-       struct dx_hash_info hinfo;
-       struct ext4_dir_entry_2 *de;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct inode *dir;
-       ext4_lblk_t block;
-       int count = 0;
-@@ -1376,7 +1395,7 @@ static struct buffer_head * ext4_dx_find
- {
-       struct super_block * sb = dir->i_sb;
-       struct dx_hash_info     hinfo;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct buffer_head *bh;
-       ext4_lblk_t block;
-       int retval;
-@@ -1832,7 +1851,7 @@ static int make_indexed_dir(handle_t *ha
-       const char      *name = dentry->d_name.name;
-       int             namelen = dentry->d_name.len;
-       struct buffer_head *bh2;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries;
-       struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
-       struct ext4_dir_entry_tail *t;
-@@ -2117,15 +2136,18 @@ static int ext4_add_entry(handle_t *hand
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-                            struct inode *inode)
- {
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-       struct dx_hash_info hinfo;
-       struct buffer_head *bh;
-       struct inode *dir = dentry->d_parent->d_inode;
-       struct super_block *sb = dir->i_sb;
-       struct ext4_dir_entry_2 *de;
-+      int restart;
-       int err;
-+again:
-+      restart = 0;
-       frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-       if (!frame)
-               return err;
-@@ -2138,33 +2160,48 @@ static int ext4_dx_add_entry(handle_t *h
-               goto cleanup;
-       }
--      BUFFER_TRACE(bh, "get_write_access");
--      err = ext4_journal_get_write_access(handle, bh);
--      if (err)
--              goto journal_error;
--
-       err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-       if (err != -ENOSPC)
-               goto cleanup;
-+      err = 0;
-       /* Block full, should compress but for now just split */
-       dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
-                      dx_get_count(entries), dx_get_limit(entries)));
-       /* Need to split index? */
-       if (dx_get_count(entries) == dx_get_limit(entries)) {
-               ext4_lblk_t newblock;
--              unsigned icount = dx_get_count(entries);
--              int levels = frame - frames;
-+              int levels = frame - frames + 1;
-+              unsigned icount;
-+              int add_level = 1;
-               struct dx_entry *entries2;
-               struct dx_node *node2;
-               struct buffer_head *bh2;
--              if (levels && (dx_get_count(frames->entries) ==
--                             dx_get_limit(frames->entries))) {
--                      ext4_warning(sb, "Directory index full!");
-+              while (frame > frames) {
-+                      if (dx_get_count((frame - 1)->entries) <
-+                          dx_get_limit((frame - 1)->entries)) {
-+                              add_level = 0;
-+                              break;
-+                      }
-+                      frame--; /* split higher index block */
-+                      at = frame->at;
-+                      entries = frame->entries;
-+                      restart = 1;
-+              }
-+              if (add_level && levels == ext4_dir_htree_level(sb)) {
-+                      ext4_warning(sb, "inode %lu: comm %s: index %u: reach max htree level %u",
-+                                       dir->i_ino, current->comm, levels,
-+                                       ext4_dir_htree_level(sb));
-+                      if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
-+                              ext4_warning(sb, "Large directory feature is"
-+                                               "not enabled on this "
-+                                               "filesystem");
-+                      }
-                       err = -ENOSPC;
-                       goto cleanup;
-               }
-+              icount = dx_get_count(entries);
-               bh2 = ext4_append(handle, dir, &newblock);
-               if (IS_ERR(bh2)) {
-                       err = PTR_ERR(bh2);
-@@ -2179,7 +2216,7 @@ static int ext4_dx_add_entry(handle_t *h
-               err = ext4_journal_get_write_access(handle, frame->bh);
-               if (err)
-                       goto journal_error;
--              if (levels) {
-+              if (!add_level) {
-                       unsigned icount1 = icount/2, icount2 = icount - icount1;
-                       unsigned hash2 = dx_get_hash(entries + icount1);
-                       dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
-@@ -2187,7 +2224,7 @@ static int ext4_dx_add_entry(handle_t *h
-                       BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-                       err = ext4_journal_get_write_access(handle,
--                                                           frames[0].bh);
-+                                                          (frame - 1)->bh);
-                       if (err)
-                               goto journal_error;
-@@ -2203,19 +2240,27 @@ static int ext4_dx_add_entry(handle_t *h
-                               frame->entries = entries = entries2;
-                               swap(frame->bh, bh2);
-                       }
--                      dx_insert_block(frames + 0, hash2, newblock);
--                      dxtrace(dx_show_index("node", frames[1].entries));
-+                      dx_insert_block(frame - 1, hash2, newblock);
-+                      dxtrace(dx_show_index("node", frame->entries));
-                       dxtrace(dx_show_index("node",
--                             ((struct dx_node *) bh2->b_data)->entries));
-+                             ((struct dx_node *)bh2->b_data)->entries));
-                       err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-                       if (err)
-                               goto journal_error;
-                       brelse (bh2);
-+                      err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                 (frame - 1)->bh);
-+                      if (err)
-+                              goto journal_error;
-+                      if (restart) {
-+                              err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                         frame->bh);
-+                              goto journal_error;
-+                      }
-               } else {
-                       struct dx_root_info *info;
--                      dxtrace(printk(KERN_DEBUG
--                                     "Creating second level index...\n"));
--                      memcpy((char *) entries2, (char *) entries,
-+
-+                      memcpy((char *)entries2, (char *)entries,
-                              icount * sizeof(struct dx_entry));
-                       dx_set_limit(entries2, dx_node_limit(dir));
-@@ -2224,22 +2267,17 @@ static int ext4_dx_add_entry(handle_t *h
-                       dx_set_block(entries + 0, newblock);
-                       info = dx_get_dx_info((struct ext4_dir_entry_2*)
-                                             frames[0].bh->b_data);
--                      info->indirect_levels = 1;
--
--                      /* Add new access path frame */
--                      frame = frames + 1;
--                      frame->at = at = at - entries + entries2;
--                      frame->entries = entries = entries2;
--                      frame->bh = bh2;
--                      err = ext4_journal_get_write_access(handle,
--                                                           frame->bh);
-+                      info->indirect_levels += 1;
-+                      dxtrace(printk(KERN_DEBUG
-+                                     "Creating %d level index...\n",
-+                                     info->indirect_levels));
-+                      err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-                       if (err)
-                               goto journal_error;
--              }
--              err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
--              if (err) {
--                      ext4_std_error(inode->i_sb, err);
--                      goto cleanup;
-+                      err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-+                      brelse(bh2);
-+                      restart = 1;
-+                      goto journal_error;
-               }
-       }
-       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-@@ -2249,10 +2285,14 @@ static int ext4_dx_add_entry(handle_t *h
-       goto cleanup;
- journal_error:
--      ext4_std_error(dir->i_sb, err);
-+      ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-       brelse(bh);
-       dx_release(frames);
-+      /* @restart is true means htree-path has been changed, we need to
-+       * repeat dx_probe() to find out valid htree-path */
-+      if (restart && err == 0)
-+              goto again;
-       return err;
- }
-Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/inode.c
-===================================================================
---- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/inode.c
-+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/inode.c
-@@ -4056,12 +4056,12 @@ struct inode *ext4_iget(struct super_blo
-       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
-               ei->i_file_acl |=
-                       ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
--      inode->i_size = ext4_isize(raw_inode);
-+      inode->i_size = ext4_isize(sb, raw_inode);
-       if ((size = i_size_read(inode)) < 0) {
-               EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
-               ret = -EIO;
-               goto bad_inode;
-       }
-       ei->i_disksize = inode->i_size;
- #ifdef CONFIG_QUOTA
-       ei->i_reserved_quota = 0;
-@@ -4306,7 +4306,7 @@ static int ext4_do_update_inode(handle_t
-               raw_inode->i_file_acl_high =
-                       cpu_to_le16(ei->i_file_acl >> 32);
-       raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
--      if (ei->i_disksize != ext4_isize(raw_inode)) {
-+      if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
-               ext4_isize_set(raw_inode, ei->i_disksize);
-               need_datasync = 1;
-       }
diff --git a/ldiskfs/kernel_patches/patches/sles12sp1/ext4-lookup-dotdot.patch b/ldiskfs/kernel_patches/patches/sles12sp1/ext4-lookup-dotdot.patch
deleted file mode 100644 (file)
index c6f0371..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-Index: linux-3.10.9-200.fc17.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-3.10.9-200.fc17.x86_64.orig/fs/ext4/namei.c
-+++ linux-3.10.9-200.fc17.x86_64/fs/ext4/namei.c
-@@ -1438,6 +1438,33 @@ static struct dentry *ext4_lookup(struct
-                       return ERR_PTR(-EIO);
-               }
-       }
-+      /* ".." shouldn't go into dcache to preserve dcache hierarchy
-+       * otherwise we'll get parent being a child of actual child.
-+       * see bug 10458 for details -bzzz */
-+      if (inode && (dentry->d_name.name[0] == '.' &&
-+                    (dentry->d_name.len == 1 || (dentry->d_name.len == 2 &&
-+                                           dentry->d_name.name[1] == '.')))) {
-+              struct dentry *goal = NULL;
-+
-+              /* first, look for an existing dentry - any one is good */
-+              goal = d_find_any_alias(inode);
-+              if (goal == NULL) {
-+                      spin_lock(&dentry->d_lock);
-+                      /* there is no alias, we need to make current dentry:
-+                       *  a) inaccessible for __d_lookup()
-+                       *  b) inaccessible for iopen */
-+                      J_ASSERT(hlist_unhashed(&dentry->d_u.d_alias));
-+                      dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-+                      /* this is d_instantiate() ... */
-+                      hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
-+                      dentry->d_inode = inode;
-+                      spin_unlock(&dentry->d_lock);
-+              }
-+              if (goal)
-+                      iput(inode);
-+              return goal;
-+      }
-+
-       return d_splice_alias(inode, dentry);
- }
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent-001.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent-001.patch
deleted file mode 100644 (file)
index de5dd19..0000000
+++ /dev/null
@@ -1,829 +0,0 @@
-diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
-index 1d1bca7..df2a96d 100644
---- a/fs/ext4/dir.c
-+++ b/fs/ext4/dir.c
-@@ -67,11 +67,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
-       const int rlen = ext4_rec_len_from_disk(de->rec_len,
-                                               dir->i_sb->s_blocksize);
--      if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
-+      if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
-               error_msg = "rec_len is smaller than minimal";
-       else if (unlikely(rlen % 4 != 0))
-               error_msg = "rec_len % 4 != 0";
--      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
-+      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
-               error_msg = "rec_len is too small for name_len";
-       else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
-@@ -205,7 +205,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
-                                * failure will be detected in the
-                                * dirent test below. */
-                               if (ext4_rec_len_from_disk(de->rec_len,
--                                      sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
-+                                  sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
-                                       break;
-                               i += ext4_rec_len_from_disk(de->rec_len,
-                                                           sb->s_blocksize);
-@@ -424,12 +424,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
-       struct fname *fname, *new_fn;
-       struct dir_private_info *info;
-       int len;
-+      int extra_data = 0;
-       info = dir_file->private_data;
-       p = &info->root.rb_node;
-       /* Create and allocate the fname structure */
--      len = sizeof(struct fname) + ent_name->len + 1;
-+      if (dirent->file_type & EXT4_DIRENT_LUFID)
-+              extra_data = ext4_get_dirent_data_len(dirent);
-+
-+      len = sizeof(struct fname) + ent_name->len + extra_data + 1;
-+
-       new_fn = kzalloc(len, GFP_KERNEL);
-       if (!new_fn)
-               return -ENOMEM;
-@@ -438,7 +443,7 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
-       new_fn->inode = le32_to_cpu(dirent->inode);
-       new_fn->name_len = ent_name->len;
-       new_fn->file_type = dirent->file_type;
--      memcpy(new_fn->name, ent_name->name, ent_name->len);
-+      memcpy(new_fn->name, ent_name->name, ent_name->len + extra_data);
-       new_fn->name[ent_name->len] = 0;
-       while (*p) {
-@@ -621,7 +626,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
-               if (ext4_check_dir_entry(dir, NULL, de, bh,
-                                        buf, buf_size, offset))
-                       return -EFSCORRUPTED;
--              nlen = EXT4_DIR_REC_LEN(de->name_len);
-+              nlen = EXT4_DIR_REC_LEN(de);
-               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-               offset += rlen;
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 613538c..10a2a86 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1069,6 +1069,7 @@ struct ext4_inode_info {
- #define EXT4_MOUNT_POSIX_ACL          0x08000 /* POSIX Access Control Lists */
- #define EXT4_MOUNT_NO_AUTO_DA_ALLOC   0x10000 /* No auto delalloc mapping */
- #define EXT4_MOUNT_BARRIER            0x20000 /* Use block barriers */
-+#define EXT4_MOUNT_DIRDATA            0x40000 /* Data in directory entries*/
- #define EXT4_MOUNT_QUOTA              0x80000 /* Some quota option set */
- #define EXT4_MOUNT_USRQUOTA           0x100000 /* "old" user quota */
- #define EXT4_MOUNT_GRPQUOTA           0x200000 /* "old" group quota */
-@@ -1781,6 +1782,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt,             ENCRYPT)
-                                        EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-                                        EXT4_FEATURE_INCOMPAT_MMP | \
-+                                       EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
-                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
-                                        EXT4_FEATURE_INCOMPAT_CSUM_SEED)
-@@ -1937,6 +1939,43 @@ struct ext4_dir_entry_tail {
- #define EXT4_FT_SYMLINK               7
- #define EXT4_FT_MAX           8
-+#define EXT4_FT_MASK          0xf
-+
-+#if EXT4_FT_MAX > EXT4_FT_MASK
-+#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
-+#endif
-+
-+/*
-+ * d_type has 4 unused bits, so it can hold four types data. these different
-+ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
-+ * stored, in flag order, after file-name in ext4 dirent.
-+*/
-+/*
-+ * this flag is added to d_type if ext4 dirent has extra data after
-+ * filename. this data length is variable and length is stored in first byte
-+ * of data. data start after filename NUL byte.
-+ * This is used by Lustre FS.
-+  */
-+#define EXT4_DIRENT_LUFID             0x10
-+
-+#define EXT4_LUFID_MAGIC    0xAD200907UL
-+struct ext4_dentry_param {
-+      __u32  edp_magic;       /* EXT4_LUFID_MAGIC */
-+      char   edp_len;         /* size of edp_data in bytes */
-+      char   edp_data[0];     /* packed array of data */
-+} __packed;
-+
-+static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
-+                                                struct ext4_dentry_param *p)
-+
-+{
-+      if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
-+              return NULL;
-+      if (p && p->edp_magic == EXT4_LUFID_MAGIC)
-+              return &p->edp_len;
-+      else
-+              return NULL;
-+}
- #define EXT4_FT_DIR_CSUM      0xDE
-@@ -1947,8 +1986,11 @@ struct ext4_dir_entry_tail {
-  */
- #define EXT4_DIR_PAD                  4
- #define EXT4_DIR_ROUND                        (EXT4_DIR_PAD - 1)
--#define EXT4_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXT4_DIR_ROUND) & \
-+#define __EXT4_DIR_REC_LEN(name_len)  (((name_len) + 8 + EXT4_DIR_ROUND) & \
-                                        ~EXT4_DIR_ROUND)
-+#define EXT4_DIR_REC_LEN(de)          (__EXT4_DIR_REC_LEN((de)->name_len +\
-+                                      ext4_get_dirent_data_len(de)))
-+
- #define EXT4_MAX_REC_LEN              ((1<<16)-1)
- /*
-@@ -2407,12 +2449,12 @@ extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                            struct buffer_head *bh,
-                            void *buf, int buf_size,
-                            struct ext4_filename *fname,
--                           struct ext4_dir_entry_2 **dest_de);
-+                           struct ext4_dir_entry_2 **dest_de, int *dlen);
- int ext4_insert_dentry(struct inode *dir,
-                      struct inode *inode,
-                      struct ext4_dir_entry_2 *de,
-                      int buf_size,
--                     struct ext4_filename *fname);
-+                     struct ext4_filename *fname, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-       if (!ext4_has_feature_dir_index(inode->i_sb))
-@@ -2424,10 +2466,17 @@ static unsigned char ext4_filetype_table[] = {
- static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
- {
--      if (!ext4_has_feature_filetype(sb) || filetype >= EXT4_FT_MAX)
-+      int fl_index = filetype & EXT4_FT_MASK;
-+
-+      if (!ext4_has_feature_filetype(sb) || fl_index >= EXT4_FT_MAX)
-               return DT_UNKNOWN;
--      return ext4_filetype_table[filetype];
-+      if (!test_opt(sb, DIRDATA))
-+              return ext4_filetype_table[fl_index];
-+
-+      return (ext4_filetype_table[fl_index]) |
-+              (filetype & EXT4_DIRENT_LUFID);
-+
- }
- extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
-                            void *buf, int buf_size);
-@@ -2575,6 +2624,8 @@ extern struct inode *ext4_create_inode(handle_t *handle,
- extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
-                            struct ext4_dir_entry_2 *de_del,
-                            struct buffer_head *bh);
-+extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                             struct inode *inode, const void *, const void *);
- extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-                               __u32 start_minor_hash, __u32 *next_hash);
- extern int ext4_search_dir(struct buffer_head *bh,
-@@ -3292,6 +3343,36 @@ extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
- extern int ext4_resize_begin(struct super_block *sb);
- extern void ext4_resize_end(struct super_block *sb);
-+/*
-+ * Compute the total directory entry data length.
-+ * This includes the filename and an implicit NUL terminator (always present),
-+ * and optional extensions.  Each extension has a bit set in the high 4 bits of
-+ * de->file_type, and the extension length is the first byte in each entry.
-+ */
-+static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
-+{
-+      char *len = de->name + de->name_len + 1 /* NUL terminator */;
-+      int dlen = 0;
-+      __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
-+      struct ext4_dir_entry_tail *t = (struct ext4_dir_entry_tail *)de;
-+
-+      if (!t->det_reserved_zero1 &&
-+          le16_to_cpu(t->det_rec_len) ==
-+              sizeof(struct ext4_dir_entry_tail) &&
-+          !t->det_reserved_zero2 &&
-+          t->det_reserved_ft == EXT4_FT_DIR_CSUM)
-+              return 0;
-+
-+      while (extra_data_flags) {
-+              if (extra_data_flags & 1) {
-+                      dlen += *len + (dlen == 0);
-+                      len += *len;
-+              }
-+              extra_data_flags >>= 1;
-+      }
-+      return dlen;
-+}
-+
- #endif        /* __KERNEL__ */
- #define EFSBADCRC     EBADMSG         /* Bad CRC detected */
-diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
-index d884989..53e8a88 100644
---- a/fs/ext4/inline.c
-+++ b/fs/ext4/inline.c
-@@ -1005,7 +1005,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
-       struct ext4_dir_entry_2 *de;
-       err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
--                              inline_size, fname, &de);
-+                              inline_size, fname, &de, NULL);
-       if (err)
-               return err;
-@@ -1013,7 +1013,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
-       err = ext4_journal_get_write_access(handle, iloc->bh);
-       if (err)
-               return err;
--      ext4_insert_dentry(dir, inode, de, inline_size, fname);
-+      ext4_insert_dentry(dir, inode, de, inline_size, fname, NULL);
-       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
-@@ -1083,7 +1083,7 @@ static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
-       int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
-       int new_size = get_max_inline_xattr_value_size(dir, iloc);
--      if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
-+      if (new_size - old_size <= __EXT4_DIR_REC_LEN(1))
-               return -ENOSPC;
-       ret = ext4_update_inline_data(handle, dir,
-@@ -1366,7 +1366,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
-                       fake.name_len = 1;
-                       strcpy(fake.name, ".");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1376,7 +1376,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
-                       fake.name_len = 2;
-                       strcpy(fake.name, "..");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1474,8 +1474,8 @@ int ext4_read_inline_dir(struct file *file,
-        * So we will use extra_offset and extra_size to indicate them
-        * during the inline dir iteration.
-        */
--      dotdot_offset = EXT4_DIR_REC_LEN(1);
--      dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
-+      dotdot_offset = __EXT4_DIR_REC_LEN(1);
-+      dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2);
-       extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
-       extra_size = extra_offset + inline_size;
-@@ -1510,7 +1510,7 @@ int ext4_read_inline_dir(struct file *file,
-                        * failure will be detected in the
-                        * dirent test below. */
-                       if (ext4_rec_len_from_disk(de->rec_len, extra_size)
--                              < EXT4_DIR_REC_LEN(1))
-+                              < __EXT4_DIR_REC_LEN(1))
-                               break;
-                       i += ext4_rec_len_from_disk(de->rec_len,
-                                                   extra_size);
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index e90dd58..11bc299 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -241,7 +241,8 @@ static unsigned dx_get_count(struct dx_entry *entries);
- static unsigned dx_get_limit(struct dx_entry *entries);
- static void dx_set_count(struct dx_entry *entries, unsigned value);
- static void dx_set_limit(struct dx_entry *entries, unsigned value);
--static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize);
- static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(struct ext4_filename *fname,
-                                struct inode *dir,
-@@ -384,22 +385,23 @@ static struct dx_countlimit *get_dx_coun
- {
-       struct ext4_dir_entry *dp;
-       struct dx_root_info *root;
--      int count_offset;
-+      int count_offset, dot_rec_len, dotdot_rec_len;
-       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
-               count_offset = 8;
--      else if (le16_to_cpu(dirent->rec_len) == 12) {
--              dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
-+      else {
-+              dot_rec_len = le16_to_cpu(dirent->rec_len);
-+              dp = (struct ext4_dir_entry *)(((void *)dirent) + dot_rec_len);
-               if (le16_to_cpu(dp->rec_len) !=
--                  EXT4_BLOCK_SIZE(inode->i_sb) - 12)
-+                  EXT4_BLOCK_SIZE(inode->i_sb) - dot_rec_len)
-                       return NULL;
--              root = (struct dx_root_info *)(((void *)dp + 12));
-+              dotdot_rec_len = EXT4_DIR_REC_LEN((struct ext4_dir_entry_2 *)dp);
-+              root = (struct dx_root_info *)(((void *)dp + dotdot_rec_len));
-               if (root->reserved_zero ||
-                   root->info_length != sizeof(struct dx_root_info))
-                       return NULL;
--              count_offset = 32;
--      } else
--              return NULL;
-+              count_offset = 8 + dot_rec_len + dotdot_rec_len;
-+      }
-       if (offset)
-               *offset = count_offset;
-@@ -504,11 +505,12 @@ ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
-  */
- struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
- {
-+      BUG_ON(de->name_len != 1);
-       /* get dotdot first */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       /* dx root info is after dotdot entry */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       return (struct dx_root_info *)de;
- }
-@@ -553,10 +555,16 @@ static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
-       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
- }
--static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
--              EXT4_DIR_REC_LEN(2) - infosize;
-+      struct ext4_dir_entry_2 *dotdot_de;
-+      unsigned entry_space;
-+
-+      BUG_ON(dot_de->name_len != 1);
-+      dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
-+      entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) -
-+                       EXT4_DIR_REC_LEN(dotdot_de) - infosize;
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -565,7 +573,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
- static inline unsigned dx_node_limit(struct inode *dir)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
-+      unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -674,7 +682,7 @@ static struct stats dx_show_leaf(struct inode *dir,
-                                      (unsigned) ((char *) de - base));
- #endif
-                       }
--                      space += EXT4_DIR_REC_LEN(de->name_len);
-+                      space += EXT4_DIR_REC_LEN(de);
-                       names++;
-               }
-               de = ext4_next_entry(de, size);
-@@ -775,11 +783,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       entries = (struct dx_entry *)(((char *)info) + info->info_length);
--      if (dx_get_limit(entries) != dx_root_limit(dir,
--                                                 info->info_length)) {
-+      if (dx_get_limit(entries) !=
-+          dx_root_limit(dir, (struct ext4_dir_entry_2 *)frame->bh->b_data,
-+                        info->info_length)) {
-               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
-                                  dx_get_limit(entries),
--                                 dx_root_limit(dir, info->info_length));
-+                                 dx_root_limit(dir,
-+                                        (struct ext4_dir_entry_2 *)frame->bh->b_data,
-+                                        info->info_length));
-               goto fail;
-       }
-@@ -963,7 +974,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
-       de = (struct ext4_dir_entry_2 *) bh->b_data;
-       top = (struct ext4_dir_entry_2 *) ((char *) de +
-                                          dir->i_sb->s_blocksize -
--                                         EXT4_DIR_REC_LEN(0));
-+                                         __EXT4_DIR_REC_LEN(0));
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
-       /* Check if the directory is encrypted */
-       if (ext4_encrypted_inode(dir)) {
-@@ -1665,7 +1676,7 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
-       while (count--) {
-               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
-                                               (from + (map->offs<<2));
--              rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+              rec_len = EXT4_DIR_REC_LEN(de);
-               memcpy (to, de, rec_len);
-               ((struct ext4_dir_entry_2 *) to)->rec_len =
-                               ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1689,7 +1700,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
-       while ((char*)de < base + blocksize) {
-               next = ext4_next_entry(de, blocksize);
-               if (de->inode && de->name_len) {
--                      rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+                      rec_len = EXT4_DIR_REC_LEN(de);
-                       if (de > to)
-                               memmove(to, de, rec_len);
-                       to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1820,15 +1831,17 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                     struct buffer_head *bh,
-                     void *buf, int buf_size,
-                     struct ext4_filename *fname,
--                    struct ext4_dir_entry_2 **dest_de)
-+                    struct ext4_dir_entry_2 **dest_de, int *dlen)
- {
-       struct ext4_dir_entry_2 *de;
--      unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
-+      unsigned short reclen = __EXT4_DIR_REC_LEN(fname_len(fname)) +
-+                                                (dlen ? *dlen : 0);
-       int nlen, rlen;
-       unsigned int offset = 0;
-       char *top;
-       int res;
-+      dlen ? *dlen = 0 : 0; /* default set to 0 */
-       de = (struct ext4_dir_entry_2 *)buf;
-       top = buf + buf_size - reclen;
-       while ((char *) de <= top) {
-@@ -1845,10 +1858,26 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                       res = -EEXIST;
-                       goto return_result;
-               }
--              nlen = EXT4_DIR_REC_LEN(de->name_len);
-+              nlen = EXT4_DIR_REC_LEN(de);
-               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-               if ((de->inode ? rlen - nlen : rlen) >= reclen)
-                       break;
-+              /* Then for dotdot entries, check for the smaller space
-+               * required for just the entry, no FID */
-+              if (fname_len(fname) == 2 && memcmp(fname_name(fname), "..", 2) == 0) {
-+                      if ((de->inode ? rlen - nlen : rlen) >=
-+                          __EXT4_DIR_REC_LEN(fname_len(fname))) {
-+                              /* set dlen=1 to indicate not
-+                               * enough space store fid */
-+                              dlen ? *dlen = 1 : 0;
-+                              break;
-+                      }
-+                      /* The new ".." entry must be written over the
-+                       * previous ".." entry, which is the first
-+                       * entry traversed by this scan. If it doesn't
-+                       * fit, something is badly wrong, so -EIO. */
-+                      return -EIO;
-+              }
-               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-               offset += rlen;
-       }
-@@ -1867,12 +1896,12 @@ int ext4_insert_dentry(struct inode *dir,
-                      struct inode *inode,
-                      struct ext4_dir_entry_2 *de,
-                      int buf_size,
--                     struct ext4_filename *fname)
-+                     struct ext4_filename *fname, void *data)
- {
-       int nlen, rlen;
--      nlen = EXT4_DIR_REC_LEN(de->name_len);
-+      nlen = EXT4_DIR_REC_LEN(de);
-       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-       if (de->inode) {
-               struct ext4_dir_entry_2 *de1 =
-@@ -1886,6 +1915,11 @@ int ext4_insert_dentry(struct inode *dir,
-       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
-       de->name_len = fname_len(fname);
-       memcpy(de->name, fname_name(fname), fname_len(fname));
-+      if (data) {
-+              de->name[fname_len(fname)] = 0;
-+              memcpy(&de->name[fname_len(fname) + 1], data, *(char *)data);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-       return 0;
- }
-@@ -1900,18 +1934,23 @@ int ext4_insert_dentry(struct inode *dir,
- static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
-                            struct inode *dir,
-                            struct inode *inode, struct ext4_dir_entry_2 *de,
--                           struct buffer_head *bh)
-+                           struct buffer_head *bh, struct dentry *dentry)
- {
-       unsigned int    blocksize = dir->i_sb->s_blocksize;
-       int             csum_size = 0;
--      int             err;
-+      int             err, dlen = 0;
-+      unsigned char   *data;
-+      data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
-+                                              dentry->d_fsdata);
-       if (ext4_has_metadata_csum(inode->i_sb))
-               csum_size = sizeof(struct ext4_dir_entry_tail);
-       if (!de) {
-+              if (data)
-+                      dlen = (*data) + 1;
-               err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
--                                      blocksize - csum_size, fname, &de);
-+                                      blocksize - csum_size, fname, &de, &dlen);
-               if (err)
-                       return err;
-       }
-@@ -1924,7 +1963,10 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
-       /* By now the buffer is marked for journaling. Due to crypto operations,
-        * the following function call may fail */
--      err = ext4_insert_dentry(dir, inode, de, blocksize, fname);
-+      /* If writing the short form of "dotdot", don't add the data section */
-+      if (dlen == 1)
-+              data = NULL;
-+      err = ext4_insert_dentry(dir, inode, de, blocksize, fname, data);
-       if (err < 0)
-               return err;
-@@ -2036,7 +2078,8 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-       dx_set_block(entries, 1);
-       dx_set_count(entries, 1);
--      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
-+      dx_set_limit(entries, dx_root_limit(dir,
-+                                       dot_de, sizeof(*dx_info)));
-       /* Initialize as for dx_probe */
-       fname->hinfo.hash_version = dx_info->hash_version;
-@@ -2064,7 +2107,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-               goto out_frames;
-       }
--      retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
-+      retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2, dentry);
- out_frames:
-       /*
-        * Even if the block split failed, we have to properly write
-@@ -2086,6 +2129,8 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       struct buffer_head *dir_block;
-       struct ext4_dir_entry_2 *de;
-       int len, journal = 0, err = 0;
-+      int dlen = 0;
-+      char *data;
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-@@ -2103,19 +2148,24 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       /* the first item must be "." */
-       assert(de->name_len == 1 && de->name[0] == '.');
-       len = le16_to_cpu(de->rec_len);
--      assert(len >= EXT4_DIR_REC_LEN(1));
--      if (len > EXT4_DIR_REC_LEN(1)) {
-+      assert(len >= __EXT4_DIR_REC_LEN(1));
-+      if (len > __EXT4_DIR_REC_LEN(1)) {
-               BUFFER_TRACE(dir_block, "get_write_access");
-               err = ext4_journal_get_write_access(handle, dir_block);
-               if (err)
-                       goto out_journal;
-               journal = 1;
--              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
-+              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-       }
--      len -= EXT4_DIR_REC_LEN(1);
--      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
-+      len -= EXT4_DIR_REC_LEN(de);
-+      data = ext4_dentry_get_data(dir->i_sb,
-+                      (struct ext4_dentry_param *)dentry->d_fsdata);
-+      if (data)
-+              dlen = *data + 1;
-+      assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
-+
-       de = (struct ext4_dir_entry_2 *)
-                       ((char *) de + le16_to_cpu(de->rec_len));
-       if (!journal) {
-@@ -2129,10 +2179,15 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       if (len > 0)
-               de->rec_len = cpu_to_le16(len);
-       else
--              assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
-+              assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
-       de->name_len = 2;
-       strcpy(de->name, "..");
--      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+      if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[2 + 1], data, *data);
-+              ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
- out_journal:
-       if (journal) {
-@@ -2214,7 +2269,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-                       goto out;
-               }
-               retval = add_dirent_to_buf(handle, &fname, dir, inode,
--                                         NULL, bh);
-+                                         NULL, bh, dentry);
-               if (retval != -ENOSPC)
-                       goto out;
-@@ -2242,7 +2297,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-               initialize_dirent_tail(t, blocksize);
-       }
--      retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh);
-+      retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh, dentry);
- out:
-       ext4_fname_free_filename(&fname);
-       brelse(bh);
-@@ -2282,7 +2337,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-       if (err)
-               goto journal_error;
--      err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh);
-+      err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh, dentry);
-       if (err != -ENOSPC)
-               goto cleanup;
-@@ -2386,7 +2441,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               err = PTR_ERR(de);
-               goto cleanup;
-       }
--      err = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
-+      err = add_dirent_to_buf(handle, fname, dir, inode, de, bh, dentry);
-       goto cleanup;
- journal_error:
-@@ -2661,37 +2716,70 @@ err_unlock_inode:
-       return err;
- }
-+struct tp_block {
-+      struct inode *inode;
-+      void *data1;
-+      void *data2;
-+};
-+
- struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
-                         struct ext4_dir_entry_2 *de,
-                         int blocksize, int csum_size,
-                         unsigned int parent_ino, int dotdot_real_len)
- {
-+      void *data1 = NULL, *data2 = NULL;
-+      int dot_reclen = 0;
-+
-+      if (dotdot_real_len == 10) {
-+              struct tp_block *tpb = (struct tp_block *)inode;
-+              data1 = tpb->data1;
-+              data2 = tpb->data2;
-+              inode = tpb->inode;
-+              dotdot_real_len = 0;
-+      }
-       de->inode = cpu_to_le32(inode->i_ino);
-       de->name_len = 1;
--      de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
--                                         blocksize);
-       strcpy(de->name, ".");
-       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      /* get packed fid data*/
-+      data1 = ext4_dentry_get_data(inode->i_sb,
-+                              (struct ext4_dentry_param *) data1);
-+      if (data1) {
-+              de->name[1] = 0;
-+              memcpy(&de->name[2], data1, *(char *) data1);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-+      de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-+      dot_reclen = cpu_to_le16(de->rec_len);
-       de = ext4_next_entry(de, blocksize);
-       de->inode = cpu_to_le32(parent_ino);
-       de->name_len = 2;
-+      strcpy(de->name, "..");
-+      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      data2 = ext4_dentry_get_data(inode->i_sb,
-+                      (struct ext4_dentry_param *) data2);
-+      if (data2) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[3], data2, *(char *) data2);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-       if (!dotdot_real_len)
-               de->rec_len = ext4_rec_len_to_disk(blocksize -
--                                      (csum_size + EXT4_DIR_REC_LEN(1)),
-+                                      (csum_size + dot_reclen),
-                                       blocksize);
-       else
-               de->rec_len = ext4_rec_len_to_disk(
--                              EXT4_DIR_REC_LEN(de->name_len), blocksize);
--      strcpy(de->name, "..");
--      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+                              EXT4_DIR_REC_LEN(de), blocksize);
-       return ext4_next_entry(de, blocksize);
- }
- static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
--                           struct inode *inode)
-+                           struct inode *inode,
-+                           const void *data1, const void *data2)
- {
-+      struct tp_block param;
-       struct buffer_head *dir_block = NULL;
-       struct ext4_dir_entry_2 *de;
-       struct ext4_dir_entry_tail *t;
-@@ -2716,7 +2804,11 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
-       if (IS_ERR(dir_block))
-               return PTR_ERR(dir_block);
-       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
--      ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
-+      param.inode = inode;
-+      param.data1 = (void *)data1;
-+      param.data2 = (void *)data2;
-+      ext4_init_dot_dotdot((struct inode *)(&param), de, blocksize,
-+                           csum_size, dir->i_ino, 10);
-       set_nlink(inode, 2);
-       if (csum_size) {
-               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
-@@ -2733,6 +2825,29 @@ out:
-       return err;
- }
-+/* Initialize @inode as a subdirectory of @dir, and add the
-+ * "." and ".." entries into the first directory block. */
-+int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                      struct inode *inode,
-+                      const void *data1, const void *data2)
-+{
-+      int rc;
-+
-+      if (IS_ERR(handle))
-+              return PTR_ERR(handle);
-+
-+      if (IS_DIRSYNC(dir))
-+              ext4_handle_sync(handle);
-+
-+      inode->i_op = &ext4_dir_inode_operations;
-+      inode->i_fop = &ext4_dir_operations;
-+      rc = ext4_init_new_dir(handle, dir, inode, data1, data2);
-+      if (!rc)
-+              rc = ext4_mark_inode_dirty(handle, inode);
-+      return rc;
-+}
-+EXPORT_SYMBOL(ext4_add_dot_dotdot);
-+
- static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
- {
-       handle_t *handle;
-@@ -2759,7 +2874,7 @@ retry:
-       inode->i_op = &ext4_dir_inode_operations;
-       inode->i_fop = &ext4_dir_operations;
--      err = ext4_init_new_dir(handle, dir, inode);
-+      err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
-       if (err)
-               goto out_clear_inode;
-       err = ext4_mark_inode_dirty(handle, inode);
-@@ -2811,7 +2926,7 @@ int ext4_empty_dir(struct inode *inode)
-       }
-       sb = inode->i_sb;
--      if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
-+      if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) {
-               EXT4_ERROR_INODE(inode, "invalid size");
-               return 1;
-       }
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 7715539..f48b36e 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1159,7 +1159,7 @@ enum {
-       Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
-       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
--      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
-+      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
-       Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
-       Opt_lazytime, Opt_nolazytime,
-@@ -1230,6 +1230,7 @@ static const match_table_t tokens = {
-       {Opt_lazytime, "lazytime"},
-       {Opt_nolazytime, "nolazytime"},
-       {Opt_nodelalloc, "nodelalloc"},
-+      {Opt_dirdata, "dirdata"},
-       {Opt_removed, "mblk_io_submit"},
-       {Opt_removed, "nomblk_io_submit"},
-       {Opt_block_validity, "block_validity"},
-@@ -1444,6 +1445,7 @@ static const struct mount_opts {
-       {Opt_usrjquota, 0, MOPT_Q},
-       {Opt_grpjquota, 0, MOPT_Q},
-       {Opt_offusrjquota, 0, MOPT_Q},
-+      {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
-       {Opt_offgrpjquota, 0, MOPT_Q},
-       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
-       {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-data-in-dirent.patch
deleted file mode 100644 (file)
index 65c9196..0000000
+++ /dev/null
@@ -1,829 +0,0 @@
-diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
-index 1d1bca7..df2a96d 100644
---- a/fs/ext4/dir.c
-+++ b/fs/ext4/dir.c
-@@ -67,11 +67,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
-       const int rlen = ext4_rec_len_from_disk(de->rec_len,
-                                               dir->i_sb->s_blocksize);
--      if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
-+      if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
-               error_msg = "rec_len is smaller than minimal";
-       else if (unlikely(rlen % 4 != 0))
-               error_msg = "rec_len % 4 != 0";
--      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
-+      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
-               error_msg = "rec_len is too small for name_len";
-       else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
-@@ -205,7 +205,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
-                                * failure will be detected in the
-                                * dirent test below. */
-                               if (ext4_rec_len_from_disk(de->rec_len,
--                                      sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
-+                                  sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
-                                       break;
-                               i += ext4_rec_len_from_disk(de->rec_len,
-                                                           sb->s_blocksize);
-@@ -424,12 +424,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
-       struct fname *fname, *new_fn;
-       struct dir_private_info *info;
-       int len;
-+      int extra_data = 0;
-       info = dir_file->private_data;
-       p = &info->root.rb_node;
-       /* Create and allocate the fname structure */
--      len = sizeof(struct fname) + ent_name->len + 1;
-+      if (dirent->file_type & EXT4_DIRENT_LUFID)
-+              extra_data = ext4_get_dirent_data_len(dirent);
-+
-+      len = sizeof(struct fname) + ent_name->len + extra_data + 1;
-+
-       new_fn = kzalloc(len, GFP_KERNEL);
-       if (!new_fn)
-               return -ENOMEM;
-@@ -438,7 +443,7 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
-       new_fn->inode = le32_to_cpu(dirent->inode);
-       new_fn->name_len = ent_name->len;
-       new_fn->file_type = dirent->file_type;
--      memcpy(new_fn->name, ent_name->name, ent_name->len);
-+      memcpy(new_fn->name, ent_name->name, ent_name->len + extra_data);
-       new_fn->name[ent_name->len] = 0;
-       while (*p) {
-@@ -621,7 +626,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
-               if (ext4_check_dir_entry(dir, NULL, de, bh,
-                                        buf, buf_size, offset))
-                       return -EFSCORRUPTED;
--              nlen = EXT4_DIR_REC_LEN(de->name_len);
-+              nlen = EXT4_DIR_REC_LEN(de);
-               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-               offset += rlen;
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 613538c..10a2a86 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1069,6 +1069,7 @@ struct ext4_inode_info {
- #define EXT4_MOUNT_POSIX_ACL          0x08000 /* POSIX Access Control Lists */
- #define EXT4_MOUNT_NO_AUTO_DA_ALLOC   0x10000 /* No auto delalloc mapping */
- #define EXT4_MOUNT_BARRIER            0x20000 /* Use block barriers */
-+#define EXT4_MOUNT_DIRDATA            0x40000 /* Data in directory entries*/
- #define EXT4_MOUNT_QUOTA              0x80000 /* Some quota option set */
- #define EXT4_MOUNT_USRQUOTA           0x100000 /* "old" user quota */
- #define EXT4_MOUNT_GRPQUOTA           0x200000 /* "old" group quota */
-@@ -1781,6 +1782,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt,             ENCRYPT)
-                                        EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-                                        EXT4_FEATURE_INCOMPAT_MMP | \
-+                                       EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
-                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
-                                        EXT4_FEATURE_INCOMPAT_CSUM_SEED)
-@@ -1937,6 +1939,43 @@ struct ext4_dir_entry_tail {
- #define EXT4_FT_SYMLINK               7
- #define EXT4_FT_MAX           8
-+#define EXT4_FT_MASK          0xf
-+
-+#if EXT4_FT_MAX > EXT4_FT_MASK
-+#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
-+#endif
-+
-+/*
-+ * d_type has 4 unused bits, so it can hold four types data. these different
-+ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
-+ * stored, in flag order, after file-name in ext4 dirent.
-+*/
-+/*
-+ * this flag is added to d_type if ext4 dirent has extra data after
-+ * filename. this data length is variable and length is stored in first byte
-+ * of data. data start after filename NUL byte.
-+ * This is used by Lustre FS.
-+  */
-+#define EXT4_DIRENT_LUFID             0x10
-+
-+#define EXT4_LUFID_MAGIC    0xAD200907UL
-+struct ext4_dentry_param {
-+      __u32  edp_magic;       /* EXT4_LUFID_MAGIC */
-+      char   edp_len;         /* size of edp_data in bytes */
-+      char   edp_data[0];     /* packed array of data */
-+} __packed;
-+
-+static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
-+                                                struct ext4_dentry_param *p)
-+
-+{
-+      if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
-+              return NULL;
-+      if (p && p->edp_magic == EXT4_LUFID_MAGIC)
-+              return &p->edp_len;
-+      else
-+              return NULL;
-+}
- #define EXT4_FT_DIR_CSUM      0xDE
-@@ -1947,8 +1986,11 @@ struct ext4_dir_entry_tail {
-  */
- #define EXT4_DIR_PAD                  4
- #define EXT4_DIR_ROUND                        (EXT4_DIR_PAD - 1)
--#define EXT4_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXT4_DIR_ROUND) & \
-+#define __EXT4_DIR_REC_LEN(name_len)  (((name_len) + 8 + EXT4_DIR_ROUND) & \
-                                        ~EXT4_DIR_ROUND)
-+#define EXT4_DIR_REC_LEN(de)          (__EXT4_DIR_REC_LEN((de)->name_len +\
-+                                      ext4_get_dirent_data_len(de)))
-+
- #define EXT4_MAX_REC_LEN              ((1<<16)-1)
- /*
-@@ -2407,12 +2449,12 @@ extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                            struct buffer_head *bh,
-                            void *buf, int buf_size,
-                            struct ext4_filename *fname,
--                           struct ext4_dir_entry_2 **dest_de);
-+                           struct ext4_dir_entry_2 **dest_de, int *dlen);
- int ext4_insert_dentry(struct inode *dir,
-                      struct inode *inode,
-                      struct ext4_dir_entry_2 *de,
-                      int buf_size,
--                     struct ext4_filename *fname);
-+                     struct ext4_filename *fname, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-       if (!ext4_has_feature_dir_index(inode->i_sb))
-@@ -2424,10 +2466,17 @@ static unsigned char ext4_filetype_table[] = {
- static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
- {
--      if (!ext4_has_feature_filetype(sb) || filetype >= EXT4_FT_MAX)
-+      int fl_index = filetype & EXT4_FT_MASK;
-+
-+      if (!ext4_has_feature_filetype(sb) || fl_index >= EXT4_FT_MAX)
-               return DT_UNKNOWN;
--      return ext4_filetype_table[filetype];
-+      if (!test_opt(sb, DIRDATA))
-+              return ext4_filetype_table[fl_index];
-+
-+      return (ext4_filetype_table[fl_index]) |
-+              (filetype & EXT4_DIRENT_LUFID);
-+
- }
- extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
-                            void *buf, int buf_size);
-@@ -2575,6 +2624,8 @@ extern struct inode *ext4_create_inode(handle_t *handle,
- extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
-                            struct ext4_dir_entry_2 *de_del,
-                            struct buffer_head *bh);
-+extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                             struct inode *inode, const void *, const void *);
- extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-                               __u32 start_minor_hash, __u32 *next_hash);
- extern int ext4_search_dir(struct buffer_head *bh,
-@@ -3292,6 +3343,36 @@ extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
- extern int ext4_resize_begin(struct super_block *sb);
- extern void ext4_resize_end(struct super_block *sb);
-+/*
-+ * Compute the total directory entry data length.
-+ * This includes the filename and an implicit NUL terminator (always present),
-+ * and optional extensions.  Each extension has a bit set in the high 4 bits of
-+ * de->file_type, and the extension length is the first byte in each entry.
-+ */
-+static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
-+{
-+      char *len = de->name + de->name_len + 1 /* NUL terminator */;
-+      int dlen = 0;
-+      __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
-+      struct ext4_dir_entry_tail *t = (struct ext4_dir_entry_tail *)de;
-+
-+      if (!t->det_reserved_zero1 &&
-+          le16_to_cpu(t->det_rec_len) ==
-+              sizeof(struct ext4_dir_entry_tail) &&
-+          !t->det_reserved_zero2 &&
-+          t->det_reserved_ft == EXT4_FT_DIR_CSUM)
-+              return 0;
-+
-+      while (extra_data_flags) {
-+              if (extra_data_flags & 1) {
-+                      dlen += *len + (dlen == 0);
-+                      len += *len;
-+              }
-+              extra_data_flags >>= 1;
-+      }
-+      return dlen;
-+}
-+
- #endif        /* __KERNEL__ */
- #define EFSBADCRC     EBADMSG         /* Bad CRC detected */
-diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
-index d884989..53e8a88 100644
---- a/fs/ext4/inline.c
-+++ b/fs/ext4/inline.c
-@@ -1005,7 +1005,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
-       struct ext4_dir_entry_2 *de;
-       err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
--                              inline_size, fname, &de);
-+                              inline_size, fname, &de, NULL);
-       if (err)
-               return err;
-@@ -1013,7 +1013,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
-       err = ext4_journal_get_write_access(handle, iloc->bh);
-       if (err)
-               return err;
--      ext4_insert_dentry(dir, inode, de, inline_size, fname);
-+      ext4_insert_dentry(dir, inode, de, inline_size, fname, NULL);
-       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
-@@ -1083,7 +1083,7 @@ static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
-       int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
-       int new_size = get_max_inline_xattr_value_size(dir, iloc);
--      if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
-+      if (new_size - old_size <= __EXT4_DIR_REC_LEN(1))
-               return -ENOSPC;
-       ret = ext4_update_inline_data(handle, dir,
-@@ -1366,7 +1366,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
-                       fake.name_len = 1;
-                       strcpy(fake.name, ".");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1376,7 +1376,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
-                       fake.name_len = 2;
-                       strcpy(fake.name, "..");
-                       fake.rec_len = ext4_rec_len_to_disk(
--                                              EXT4_DIR_REC_LEN(fake.name_len),
-+                                              EXT4_DIR_REC_LEN(&fake),
-                                               inline_size);
-                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
-                       de = &fake;
-@@ -1474,8 +1474,8 @@ int ext4_read_inline_dir(struct file *file,
-        * So we will use extra_offset and extra_size to indicate them
-        * during the inline dir iteration.
-        */
--      dotdot_offset = EXT4_DIR_REC_LEN(1);
--      dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
-+      dotdot_offset = __EXT4_DIR_REC_LEN(1);
-+      dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2);
-       extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
-       extra_size = extra_offset + inline_size;
-@@ -1510,7 +1510,7 @@ int ext4_read_inline_dir(struct file *file,
-                        * failure will be detected in the
-                        * dirent test below. */
-                       if (ext4_rec_len_from_disk(de->rec_len, extra_size)
--                              < EXT4_DIR_REC_LEN(1))
-+                              < __EXT4_DIR_REC_LEN(1))
-                               break;
-                       i += ext4_rec_len_from_disk(de->rec_len,
-                                                   extra_size);
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 73d73fb..f6465b6 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -241,7 +241,8 @@ static unsigned dx_get_count(struct dx_entry *entries);
- static unsigned dx_get_limit(struct dx_entry *entries);
- static void dx_set_count(struct dx_entry *entries, unsigned value);
- static void dx_set_limit(struct dx_entry *entries, unsigned value);
--static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize);
- static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(struct ext4_filename *fname,
-                                struct inode *dir,
-@@ -384,22 +385,23 @@ static struct dx_countlimit *get_dx_coun
- {
-       struct ext4_dir_entry *dp;
-       struct dx_root_info *root;
--      int count_offset;
-+      int count_offset, dot_rec_len, dotdot_rec_len;
-       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
-               count_offset = 8;
--      else if (le16_to_cpu(dirent->rec_len) == 12) {
--              dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
-+      else {
-+              dot_rec_len = le16_to_cpu(dirent->rec_len);
-+              dp = (struct ext4_dir_entry *)(((void *)dirent) + dot_rec_len);
-               if (le16_to_cpu(dp->rec_len) !=
--                  EXT4_BLOCK_SIZE(inode->i_sb) - 12)
-+                  EXT4_BLOCK_SIZE(inode->i_sb) - dot_rec_len)
-                       return NULL;
--              root = (struct dx_root_info *)(((void *)dp + 12));
-+              dotdot_rec_len = EXT4_DIR_REC_LEN((struct ext4_dir_entry_2 *)dp);
-+              root = (struct dx_root_info *)(((void *)dp + dotdot_rec_len));
-               if (root->reserved_zero ||
-                   root->info_length != sizeof(struct dx_root_info))
-                       return NULL;
--              count_offset = 32;
--      } else
--              return NULL;
-+              count_offset = 8 + dot_rec_len + dotdot_rec_len;
-+      }
-       if (offset)
-               *offset = count_offset;
-@@ -504,11 +505,12 @@ ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
-  */
- struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
- {
-+      BUG_ON(de->name_len != 1);
-       /* get dotdot first */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       /* dx root info is after dotdot entry */
--      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
-+      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-       return (struct dx_root_info *)de;
- }
-@@ -553,10 +555,16 @@ static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
-       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
- }
--static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
-+static inline unsigned dx_root_limit(struct inode *dir,
-+              struct ext4_dir_entry_2 *dot_de, unsigned infosize)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
--              EXT4_DIR_REC_LEN(2) - infosize;
-+      struct ext4_dir_entry_2 *dotdot_de;
-+      unsigned entry_space;
-+
-+      BUG_ON(dot_de->name_len != 1);
-+      dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
-+      entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) -
-+                       EXT4_DIR_REC_LEN(dotdot_de) - infosize;
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -565,7 +573,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
- static inline unsigned dx_node_limit(struct inode *dir)
- {
--      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
-+      unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
-       if (ext4_has_metadata_csum(dir->i_sb))
-               entry_space -= sizeof(struct dx_tail);
-@@ -674,7 +682,7 @@ static struct stats dx_show_leaf(struct inode *dir,
-                                      (unsigned) ((char *) de - base));
- #endif
-                       }
--                      space += EXT4_DIR_REC_LEN(de->name_len);
-+                      space += EXT4_DIR_REC_LEN(de);
-                       names++;
-               }
-               de = ext4_next_entry(de, size);
-@@ -775,11 +783,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       entries = (struct dx_entry *)(((char *)info) + info->info_length);
--      if (dx_get_limit(entries) != dx_root_limit(dir,
--                                                 info->info_length)) {
-+      if (dx_get_limit(entries) !=
-+          dx_root_limit(dir, (struct ext4_dir_entry_2 *)frame->bh->b_data,
-+                        info->info_length)) {
-               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
-                                  dx_get_limit(entries),
--                                 dx_root_limit(dir, info->info_length));
-+                                 dx_root_limit(dir,
-+                                        (struct ext4_dir_entry_2 *)frame->bh->b_data,
-+                                        info->info_length));
-               goto fail;
-       }
-@@ -963,7 +974,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
-       de = (struct ext4_dir_entry_2 *) bh->b_data;
-       top = (struct ext4_dir_entry_2 *) ((char *) de +
-                                          dir->i_sb->s_blocksize -
--                                         EXT4_DIR_REC_LEN(0));
-+                                         __EXT4_DIR_REC_LEN(0));
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
-       /* Check if the directory is encrypted */
-       if (ext4_encrypted_inode(dir)) {
-@@ -1665,7 +1676,7 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
-       while (count--) {
-               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
-                                               (from + (map->offs<<2));
--              rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+              rec_len = EXT4_DIR_REC_LEN(de);
-               memcpy (to, de, rec_len);
-               ((struct ext4_dir_entry_2 *) to)->rec_len =
-                               ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1689,7 +1700,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
-       while ((char*)de < base + blocksize) {
-               next = ext4_next_entry(de, blocksize);
-               if (de->inode && de->name_len) {
--                      rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+                      rec_len = EXT4_DIR_REC_LEN(de);
-                       if (de > to)
-                               memmove(to, de, rec_len);
-                       to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1820,15 +1831,17 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                     struct buffer_head *bh,
-                     void *buf, int buf_size,
-                     struct ext4_filename *fname,
--                    struct ext4_dir_entry_2 **dest_de)
-+                    struct ext4_dir_entry_2 **dest_de, int *dlen)
- {
-       struct ext4_dir_entry_2 *de;
--      unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
-+      unsigned short reclen = __EXT4_DIR_REC_LEN(fname_len(fname)) +
-+                                                (dlen ? *dlen : 0);
-       int nlen, rlen;
-       unsigned int offset = 0;
-       char *top;
-       int res;
-+      dlen ? *dlen = 0 : 0; /* default set to 0 */
-       de = (struct ext4_dir_entry_2 *)buf;
-       top = buf + buf_size - reclen;
-       while ((char *) de <= top) {
-@@ -1845,10 +1858,26 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
-                       res = -EEXIST;
-                       goto return_result;
-               }
--              nlen = EXT4_DIR_REC_LEN(de->name_len);
-+              nlen = EXT4_DIR_REC_LEN(de);
-               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-               if ((de->inode ? rlen - nlen : rlen) >= reclen)
-                       break;
-+              /* Then for dotdot entries, check for the smaller space
-+               * required for just the entry, no FID */
-+              if (fname_len(fname) == 2 && memcmp(fname_name(fname), "..", 2) == 0) {
-+                      if ((de->inode ? rlen - nlen : rlen) >=
-+                          __EXT4_DIR_REC_LEN(fname_len(fname))) {
-+                              /* set dlen=1 to indicate not
-+                               * enough space store fid */
-+                              dlen ? *dlen = 1 : 0;
-+                              break;
-+                      }
-+                      /* The new ".." entry must be written over the
-+                       * previous ".." entry, which is the first
-+                       * entry traversed by this scan. If it doesn't
-+                       * fit, something is badly wrong, so -EIO. */
-+                      return -EIO;
-+              }
-               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-               offset += rlen;
-       }
-@@ -1867,12 +1896,12 @@ int ext4_insert_dentry(struct inode *dir,
-                      struct inode *inode,
-                      struct ext4_dir_entry_2 *de,
-                      int buf_size,
--                     struct ext4_filename *fname)
-+                     struct ext4_filename *fname, void *data)
- {
-       int nlen, rlen;
--      nlen = EXT4_DIR_REC_LEN(de->name_len);
-+      nlen = EXT4_DIR_REC_LEN(de);
-       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
-       if (de->inode) {
-               struct ext4_dir_entry_2 *de1 =
-@@ -1886,6 +1915,11 @@ int ext4_insert_dentry(struct inode *dir,
-       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
-       de->name_len = fname_len(fname);
-       memcpy(de->name, fname_name(fname), fname_len(fname));
-+      if (data) {
-+              de->name[fname_len(fname)] = 0;
-+              memcpy(&de->name[fname_len(fname) + 1], data, *(char *)data);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-       return 0;
- }
-@@ -1900,18 +1934,23 @@ int ext4_insert_dentry(struct inode *dir,
- static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
-                            struct inode *dir,
-                            struct inode *inode, struct ext4_dir_entry_2 *de,
--                           struct buffer_head *bh)
-+                           struct buffer_head *bh, struct dentry *dentry)
- {
-       unsigned int    blocksize = dir->i_sb->s_blocksize;
-       int             csum_size = 0;
--      int             err;
-+      int             err, dlen = 0;
-+      unsigned char   *data;
-+      data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
-+                                              dentry->d_fsdata);
-       if (ext4_has_metadata_csum(inode->i_sb))
-               csum_size = sizeof(struct ext4_dir_entry_tail);
-       if (!de) {
-+              if (data)
-+                      dlen = (*data) + 1;
-               err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
--                                      blocksize - csum_size, fname, &de);
-+                                      blocksize - csum_size, fname, &de, &dlen);
-               if (err)
-                       return err;
-       }
-@@ -1924,7 +1963,10 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
-       /* By now the buffer is marked for journaling. Due to crypto operations,
-        * the following function call may fail */
--      err = ext4_insert_dentry(dir, inode, de, blocksize, fname);
-+      /* If writing the short form of "dotdot", don't add the data section */
-+      if (dlen == 1)
-+              data = NULL;
-+      err = ext4_insert_dentry(dir, inode, de, blocksize, fname, data);
-       if (err < 0)
-               return err;
-@@ -2036,7 +2078,8 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-       dx_set_block(entries, 1);
-       dx_set_count(entries, 1);
--      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
-+      dx_set_limit(entries, dx_root_limit(dir,
-+                                       dot_de, sizeof(*dx_info)));
-       /* Initialize as for dx_probe */
-       fname->hinfo.hash_version = dx_info->hash_version;
-@@ -2066,7 +2109,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-       }
-       dx_release(frames);
--      retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
-+      retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh, dentry);
-       brelse(bh);
-       return retval;
- out_frames:
-@@ -2088,6 +2131,8 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       struct buffer_head *dir_block;
-       struct ext4_dir_entry_2 *de;
-       int len, journal = 0, err = 0;
-+      int dlen = 0;
-+      char *data;
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-@@ -2105,19 +2150,24 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       /* the first item must be "." */
-       assert(de->name_len == 1 && de->name[0] == '.');
-       len = le16_to_cpu(de->rec_len);
--      assert(len >= EXT4_DIR_REC_LEN(1));
--      if (len > EXT4_DIR_REC_LEN(1)) {
-+      assert(len >= __EXT4_DIR_REC_LEN(1));
-+      if (len > __EXT4_DIR_REC_LEN(1)) {
-               BUFFER_TRACE(dir_block, "get_write_access");
-               err = ext4_journal_get_write_access(handle, dir_block);
-               if (err)
-                       goto out_journal;
-               journal = 1;
--              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
-+              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-       }
--      len -= EXT4_DIR_REC_LEN(1);
--      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
-+      len -= EXT4_DIR_REC_LEN(de);
-+      data = ext4_dentry_get_data(dir->i_sb,
-+                      (struct ext4_dentry_param *)dentry->d_fsdata);
-+      if (data)
-+              dlen = *data + 1;
-+      assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
-+
-       de = (struct ext4_dir_entry_2 *)
-                       ((char *) de + le16_to_cpu(de->rec_len));
-       if (!journal) {
-@@ -2131,10 +2181,15 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
-       if (len > 0)
-               de->rec_len = cpu_to_le16(len);
-       else
--              assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
-+              assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
-       de->name_len = 2;
-       strcpy(de->name, "..");
--      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+      if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[2 + 1], data, *data);
-+              ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
- out_journal:
-       if (journal) {
-@@ -2216,7 +2271,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-                       goto out;
-               }
-               retval = add_dirent_to_buf(handle, &fname, dir, inode,
--                                         NULL, bh);
-+                                         NULL, bh, dentry);
-               if (retval != -ENOSPC)
-                       goto out;
-@@ -2244,7 +2299,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-               initialize_dirent_tail(t, blocksize);
-       }
--      retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh);
-+      retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh, dentry);
- out:
-       ext4_fname_free_filename(&fname);
-       brelse(bh);
-@@ -2284,7 +2339,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-       if (err)
-               goto journal_error;
--      err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh);
-+      err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh, dentry);
-       if (err != -ENOSPC)
-               goto cleanup;
-@@ -2388,7 +2443,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               err = PTR_ERR(de);
-               goto cleanup;
-       }
--      err = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
-+      err = add_dirent_to_buf(handle, fname, dir, inode, de, bh, dentry);
-       goto cleanup;
- journal_error:
-@@ -2663,37 +2718,70 @@ err_unlock_inode:
-       return err;
- }
-+struct tp_block {
-+      struct inode *inode;
-+      void *data1;
-+      void *data2;
-+};
-+
- struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
-                         struct ext4_dir_entry_2 *de,
-                         int blocksize, int csum_size,
-                         unsigned int parent_ino, int dotdot_real_len)
- {
-+      void *data1 = NULL, *data2 = NULL;
-+      int dot_reclen = 0;
-+
-+      if (dotdot_real_len == 10) {
-+              struct tp_block *tpb = (struct tp_block *)inode;
-+              data1 = tpb->data1;
-+              data2 = tpb->data2;
-+              inode = tpb->inode;
-+              dotdot_real_len = 0;
-+      }
-       de->inode = cpu_to_le32(inode->i_ino);
-       de->name_len = 1;
--      de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
--                                         blocksize);
-       strcpy(de->name, ".");
-       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      /* get packed fid data*/
-+      data1 = ext4_dentry_get_data(inode->i_sb,
-+                              (struct ext4_dentry_param *) data1);
-+      if (data1) {
-+              de->name[1] = 0;
-+              memcpy(&de->name[2], data1, *(char *) data1);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-+      de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-+      dot_reclen = cpu_to_le16(de->rec_len);
-       de = ext4_next_entry(de, blocksize);
-       de->inode = cpu_to_le32(parent_ino);
-       de->name_len = 2;
-+      strcpy(de->name, "..");
-+      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+      data2 = ext4_dentry_get_data(inode->i_sb,
-+                      (struct ext4_dentry_param *) data2);
-+      if (data2) {
-+              de->name[2] = 0;
-+              memcpy(&de->name[3], data2, *(char *) data2);
-+              de->file_type |= EXT4_DIRENT_LUFID;
-+      }
-       if (!dotdot_real_len)
-               de->rec_len = ext4_rec_len_to_disk(blocksize -
--                                      (csum_size + EXT4_DIR_REC_LEN(1)),
-+                                      (csum_size + dot_reclen),
-                                       blocksize);
-       else
-               de->rec_len = ext4_rec_len_to_disk(
--                              EXT4_DIR_REC_LEN(de->name_len), blocksize);
--      strcpy(de->name, "..");
--      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
-+                              EXT4_DIR_REC_LEN(de), blocksize);
-       return ext4_next_entry(de, blocksize);
- }
- static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
--                           struct inode *inode)
-+                           struct inode *inode,
-+                           const void *data1, const void *data2)
- {
-+      struct tp_block param;
-       struct buffer_head *dir_block = NULL;
-       struct ext4_dir_entry_2 *de;
-       struct ext4_dir_entry_tail *t;
-@@ -2718,7 +2806,11 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
-       if (IS_ERR(dir_block))
-               return PTR_ERR(dir_block);
-       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
--      ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
-+      param.inode = inode;
-+      param.data1 = (void *)data1;
-+      param.data2 = (void *)data2;
-+      ext4_init_dot_dotdot((struct inode *)(&param), de, blocksize,
-+                           csum_size, dir->i_ino, 10);
-       set_nlink(inode, 2);
-       if (csum_size) {
-               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
-@@ -2735,6 +2827,29 @@ out:
-       return err;
- }
-+/* Initialize @inode as a subdirectory of @dir, and add the
-+ * "." and ".." entries into the first directory block. */
-+int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+                      struct inode *inode,
-+                      const void *data1, const void *data2)
-+{
-+      int rc;
-+
-+      if (IS_ERR(handle))
-+              return PTR_ERR(handle);
-+
-+      if (IS_DIRSYNC(dir))
-+              ext4_handle_sync(handle);
-+
-+      inode->i_op = &ext4_dir_inode_operations;
-+      inode->i_fop = &ext4_dir_operations;
-+      rc = ext4_init_new_dir(handle, dir, inode, data1, data2);
-+      if (!rc)
-+              rc = ext4_mark_inode_dirty(handle, inode);
-+      return rc;
-+}
-+EXPORT_SYMBOL(ext4_add_dot_dotdot);
-+
- static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
- {
-       handle_t *handle;
-@@ -2761,7 +2876,7 @@ retry:
-       inode->i_op = &ext4_dir_inode_operations;
-       inode->i_fop = &ext4_dir_operations;
--      err = ext4_init_new_dir(handle, dir, inode);
-+      err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
-       if (err)
-               goto out_clear_inode;
-       err = ext4_mark_inode_dirty(handle, inode);
-@@ -2813,7 +2928,7 @@ int ext4_empty_dir(struct inode *inode)
-       }
-       sb = inode->i_sb;
--      if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
-+      if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) {
-               EXT4_ERROR_INODE(inode, "invalid size");
-               return 1;
-       }
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 7715539..f48b36e 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1159,7 +1159,7 @@ enum {
-       Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
-       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
--      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
-+      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
-       Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
-       Opt_lazytime, Opt_nolazytime,
-@@ -1230,6 +1230,7 @@ static const match_table_t tokens = {
-       {Opt_lazytime, "lazytime"},
-       {Opt_nolazytime, "nolazytime"},
-       {Opt_nodelalloc, "nodelalloc"},
-+      {Opt_dirdata, "dirdata"},
-       {Opt_removed, "mblk_io_submit"},
-       {Opt_removed, "nomblk_io_submit"},
-       {Opt_block_validity, "block_validity"},
-@@ -1444,6 +1445,7 @@ static const struct mount_opts {
-       {Opt_usrjquota, 0, MOPT_Q},
-       {Opt_grpjquota, 0, MOPT_Q},
-       {Opt_offusrjquota, 0, MOPT_Q},
-+      {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
-       {Opt_offgrpjquota, 0, MOPT_Q},
-       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
-       {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache-001.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache-001.patch
deleted file mode 100644 (file)
index 758fd04..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index f256696..d7a3413 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1047,6 +1047,7 @@ struct ext4_inode_info {
- /*
-  * Mount flags set via mount options or defaults
-  */
-+#define EXT4_MOUNT_NO_MBCACHE         0x00001 /* Disable mbcache */
- #define EXT4_MOUNT_GRPID              0x00004 /* Create files with directory's group */
- #define EXT4_MOUNT_DEBUG              0x00008 /* Some debugging messages */
- #define EXT4_MOUNT_ERRORS_CONT                0x00010 /* Continue on errors */
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index e286670..97e5e32 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1168,6 +1168,7 @@ enum {
-       Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
-       Opt_inode_readahead_blks, Opt_journal_ioprio,
-       Opt_dioread_nolock, Opt_dioread_lock,
-+      Opt_no_mbcache,
-       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
-       Opt_max_dir_size_kb, Opt_nojournal_checksum,
- };
-@@ -1247,6 +1248,7 @@ static const match_table_t tokens = {
-       {Opt_discard, "discard"},
-       {Opt_nodiscard, "nodiscard"},
-       {Opt_init_itable, "init_itable=%u"},
-+      {Opt_no_mbcache, "no_mbcache"},
-       {Opt_init_itable, "init_itable"},
-       {Opt_noinit_itable, "noinit_itable"},
-       {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
-@@ -1410,6 +1412,7 @@ static const struct mount_opts {
-       {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
-       {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
-       {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
-+      {Opt_no_mbcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
-       {Opt_commit, 0, MOPT_GTE0},
-       {Opt_max_batch_time, 0, MOPT_GTE0},
-       {Opt_min_batch_time, 0, MOPT_GTE0},
-diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index 22ba197..d7e225e 100644
---- a/fs/ext4/xattr.c
-+++ b/fs/ext4/xattr.c
-@@ -80,7 +80,7 @@
- # define ea_bdebug(bh, fmt, ...)      no_printk(fmt, ##__VA_ARGS__)
- #endif
--static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
-+static void _ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
- static struct buffer_head *ext4_xattr_cache_find(struct inode *,
-                                                struct ext4_xattr_header *,
-                                                struct mb_cache_entry **);
-@@ -397,7 +397,8 @@ bad_block:
-               error = -EFSCORRUPTED;
-               goto cleanup;
-       }
--      ext4_xattr_cache_insert(ext4_mb_cache, bh);
-+      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+              _ext4_xattr_cache_insert(ext4_mb_cache, bh);
-       entry = BFIRST(bh);
-       error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
-                                     inode);
-@@ -561,7 +562,8 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-               error = -EFSCORRUPTED;
-               goto cleanup;
-       }
--      ext4_xattr_cache_insert(ext4_mb_cache, bh);
-+      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+              _ext4_xattr_cache_insert(ext4_mb_cache, bh);
-       error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
- cleanup:
-@@ -669,7 +671,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
-       lock_buffer(bh);
-       if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
-               ea_bdebug(bh, "refcount now=0; freeing");
--              if (ce)
-+              if (ce && !test_opt(inode->i_sb, NO_MBCACHE))
-                       mb_cache_entry_free(ce);
-               get_bh(bh);
-               unlock_buffer(bh);
-@@ -1086,7 +1088,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-               lock_buffer(bs->bh);
-               if (header(s->base)->h_refcount == cpu_to_le32(1)) {
--                      if (ce) {
-+                      if (ce && !test_opt(inode->i_sb, NO_MBCACHE)) {
-                               mb_cache_entry_free(ce);
-                               ce = NULL;
-                       }
-@@ -1096,8 +1098,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-                               if (!IS_LAST_ENTRY(s->first))
-                                       ext4_xattr_rehash(header(s->base),
-                                                         s->here);
--                              ext4_xattr_cache_insert(ext4_mb_cache,
--                                      bs->bh);
-+                              if (!test_opt(inode->i_sb, NO_MBCACHE))
-+                                      _ext4_xattr_cache_insert(ext4_mb_cache,
-+                                                              bs->bh);
-                       }
-                       ext4_xattr_block_csum_set(inode, bs->bh);
-                       unlock_buffer(bs->bh);
-@@ -1231,7 +1234,8 @@ getblk_failed:
-                       ext4_xattr_block_csum_set(inode, new_bh);
-                       set_buffer_uptodate(new_bh);
-                       unlock_buffer(new_bh);
--                      ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-+                      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+                              _ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-                       error = ext4_handle_dirty_metadata(handle, inode,
-                                                          new_bh);
-                       if (error)
-@@ -2024,7 +2028,7 @@ ext4_xattr_put_super(struct super_block *sb)
-  * Returns 0, or a negative error number on failure.
-  */
- static void
--ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
-+_ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
- {
-       __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-       struct mb_cache_entry *ce;
-@@ -2103,6 +2107,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
-       struct mb_cache_entry *ce;
-       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
-+      if (test_opt(inode->i_sb, NO_MBCACHE))
-+              return NULL;
-       if (!header->h_hash)
-               return NULL;  /* never share */
-       ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-disable-mb-cache.patch
deleted file mode 100644 (file)
index bdae7ff..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 217fdcc..9abdbde 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1046,6 +1046,7 @@ struct ext4_inode_info {
- /*
-  * Mount flags set via mount options or defaults
-  */
-+#define EXT4_MOUNT_NO_MBCACHE         0x00001 /* Disable mbcache */
- #define EXT4_MOUNT_GRPID              0x00004 /* Create files with directory's group */
- #define EXT4_MOUNT_DEBUG              0x00008 /* Some debugging messages */
- #define EXT4_MOUNT_ERRORS_CONT                0x00010 /* Continue on errors */
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index f48b36e..02fe65b 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1166,6 +1166,7 @@ enum {
-       Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
-       Opt_inode_readahead_blks, Opt_journal_ioprio,
-       Opt_dioread_nolock, Opt_dioread_lock,
-+      Opt_no_mbcache,
-       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
-       Opt_max_dir_size_kb, Opt_nojournal_checksum,
- };
-@@ -1245,6 +1246,7 @@ static const match_table_t tokens = {
-       {Opt_discard, "discard"},
-       {Opt_nodiscard, "nodiscard"},
-       {Opt_init_itable, "init_itable=%u"},
-+      {Opt_no_mbcache, "no_mbcache"},
-       {Opt_init_itable, "init_itable"},
-       {Opt_noinit_itable, "noinit_itable"},
-       {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
-@@ -1408,6 +1410,7 @@ static const struct mount_opts {
-       {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
-       {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
-       {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
-+      {Opt_no_mbcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
-       {Opt_commit, 0, MOPT_GTE0},
-       {Opt_max_batch_time, 0, MOPT_GTE0},
-       {Opt_min_batch_time, 0, MOPT_GTE0},
-diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index b69145b..0e0557e 100644
---- a/fs/ext4/xattr.c
-+++ b/fs/ext4/xattr.c
-@@ -80,7 +80,7 @@
- # define ea_bdebug(bh, fmt, ...)      no_printk(fmt, ##__VA_ARGS__)
- #endif
--static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
-+static void _ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
- static struct buffer_head *ext4_xattr_cache_find(struct inode *,
-                                                struct ext4_xattr_header *,
-                                                struct mb_cache_entry **);
-@@ -402,7 +402,8 @@ bad_block:
-               error = -EFSCORRUPTED;
-               goto cleanup;
-       }
--      ext4_xattr_cache_insert(ext4_mb_cache, bh);
-+      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+              _ext4_xattr_cache_insert(ext4_mb_cache, bh);
-       entry = BFIRST(bh);
-       error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
-                                     inode);
-@@ -566,7 +567,8 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-               error = -EFSCORRUPTED;
-               goto cleanup;
-       }
--      ext4_xattr_cache_insert(ext4_mb_cache, bh);
-+      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+              _ext4_xattr_cache_insert(ext4_mb_cache, bh);
-       error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
- cleanup:
-@@ -674,7 +676,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
-       lock_buffer(bh);
-       if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
-               ea_bdebug(bh, "refcount now=0; freeing");
--              if (ce)
-+              if (ce && !test_opt(inode->i_sb, NO_MBCACHE))
-                       mb_cache_entry_free(ce);
-               get_bh(bh);
-               unlock_buffer(bh);
-@@ -1091,7 +1093,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-               lock_buffer(bs->bh);
-               if (header(s->base)->h_refcount == cpu_to_le32(1)) {
--                      if (ce) {
-+                      if (ce && !test_opt(inode->i_sb, NO_MBCACHE)) {
-                               mb_cache_entry_free(ce);
-                               ce = NULL;
-                       }
-@@ -1101,8 +1103,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-                               if (!IS_LAST_ENTRY(s->first))
-                                       ext4_xattr_rehash(header(s->base),
-                                                         s->here);
--                              ext4_xattr_cache_insert(ext4_mb_cache,
--                                      bs->bh);
-+                              if (!test_opt(inode->i_sb, NO_MBCACHE))
-+                                      _ext4_xattr_cache_insert(ext4_mb_cache,
-+                                                              bs->bh);
-                       }
-                       unlock_buffer(bs->bh);
-                       if (error == -EFSCORRUPTED)
-@@ -1233,7 +1236,8 @@ getblk_failed:
-                       memcpy(new_bh->b_data, s->base, new_bh->b_size);
-                       set_buffer_uptodate(new_bh);
-                       unlock_buffer(new_bh);
--                      ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-+                      if (!test_opt(inode->i_sb, NO_MBCACHE))
-+                              _ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-                       error = ext4_handle_dirty_xattr_block(handle,
-                                                             inode, new_bh);
-                       if (error)
-@@ -2026,7 +2030,7 @@ ext4_xattr_put_super(struct super_block *sb)
-  * Returns 0, or a negative error number on failure.
-  */
- static void
--ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
-+_ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
- {
-       __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-       struct mb_cache_entry *ce;
-@@ -2105,6 +2109,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
-       struct mb_cache_entry *ce;
-       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
-+      if (test_opt(inode->i_sb, NO_MBCACHE))
-+              return NULL;
-       if (!header->h_hash)
-               return NULL;  /* never share */
-       ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir-001.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir-001.patch
deleted file mode 100644 (file)
index 2dcf94c..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 7db2188..0242856 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1789,6 +1789,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt,             ENCRYPT)
-                                        EXT4_FEATURE_INCOMPAT_MMP | \
-                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
-+                                       EXT4_FEATURE_INCOMPAT_LARGEDIR | \
-                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
-                                        EXT4_FEATURE_INCOMPAT_CSUM_SEED)
- #define EXT4_FEATURE_RO_COMPAT_SUPP   (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-@@ -2262,6 +2263,9 @@ struct mmpd_data {
- # define NORET_TYPE   /**/
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND    noreturn,
-+/* htree levels for ext4 */
-+#define EXT4_HTREE_LEVEL_COMPAT 2
-+#define EXT4_HTREE_LEVEL      3
- struct ext4_xattr_ino_array {
-       unsigned int xia_count;         /* # of used item in the array */
-@@ -2883,13 +2887,16 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
-       es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
- }
--static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
-+static inline loff_t ext4_isize(struct super_block *sb,
-+                              struct ext4_inode *raw_inode)
- {
--      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
-+      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)) ||
-+          (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) &&
-+          S_ISDIR(le16_to_cpu(raw_inode->i_mode))))
-               return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
-                       le32_to_cpu(raw_inode->i_size_lo);
--      else
--              return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
-+
-+      return (loff_t)le32_to_cpu(raw_inode->i_size_lo);
- }
- static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 08c0cba..44e3ad4 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -4305,7 +4305,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
-       if (ext4_has_feature_64bit(sb))
-               ei->i_file_acl |=
-                       ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
--      inode->i_size = ext4_isize(raw_inode);
-+      inode->i_size = ext4_isize(sb, raw_inode);
-       if ((size = i_size_read(inode)) < 0) {
-               EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
-               ret = -EFSCORRUPTED;
-@@ -4627,7 +4627,7 @@ static int ext4_do_update_inode(handle_t *handle,
-               raw_inode->i_file_acl_high =
-                       cpu_to_le16(ei->i_file_acl >> 32);
-       raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
--      if (ei->i_disksize != ext4_isize(raw_inode)) {
-+      if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
-               ext4_isize_set(raw_inode, ei->i_disksize);
-               need_datasync = 1;
-       }
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 11bc299..2543b8f 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -517,7 +517,14 @@ struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
- static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
- {
--      return le32_to_cpu(entry->block) & 0x00ffffff;
-+      return le32_to_cpu(entry->block) & 0x0fffffff;
-+}
-+
-+static inline int
-+ext4_dir_htree_level(struct super_block *sb)
-+{
-+      return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
-+              EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
- }
- static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
-@@ -746,6 +753,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
-       u32 hash;
-+      memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
-       frame->bh = ext4_read_dirblock(dir, 0, INDEX);
-       if (IS_ERR(frame->bh))
-               return (struct dx_frame *) frame->bh;
-@@ -775,9 +783,13 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       }
-       indirect = info->indirect_levels;
--      if (indirect > 1) {
--              ext4_warning_inode(dir, "Unimplemented hash depth: %#06x",
--                                 info->indirect_levels);
-+      if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
-+              ext4_warning_inode(dir, "htree depth: %#06x exceed max depth %u",
-+                                 indirect, ext4_dir_htree_level(dir->i_sb));
-+              if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
-+                      ext4_warning(dir->i_sb, "Enable large directory "
-+                                              "feature to access it");
-+              }
-               goto fail;
-       }
-@@ -867,12 +879,20 @@ fail:
- static void dx_release(struct dx_frame *frames)
- {
-+      int i;
-+      struct dx_root_info *info;
-+
-       if (frames[0].bh == NULL)
-               return;
--      if (((struct dx_root *)frames[0].bh->b_data)->info.indirect_levels)
--              brelse(frames[1].bh);
--      brelse(frames[0].bh);
-+      for (i = 0, info = dx_get_dx_info((struct ext4_dir_entry_2 *)frames[0].bh->b_data);
-+           i <= info->indirect_levels;
-+           i++) {
-+              if (frames[i].bh == NULL)
-+                      break;
-+              brelse(frames[i].bh);
-+              frames[i].bh = NULL;
-+      }
- }
- /*
-@@ -1055,7 +1075,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
- {
-       struct dx_hash_info hinfo;
-       struct ext4_dir_entry_2 *de;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct inode *dir;
-       ext4_lblk_t block;
-       int count = 0;
-@@ -1514,7 +1534,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-                       struct ext4_dir_entry_2 **res_dir)
- {
-       struct super_block * sb = dir->i_sb;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       const struct qstr *d_name = fname->usr_fname;
-       struct buffer_head *bh;
-       ext4_lblk_t block;
-@@ -2002,7 +2022,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
- {
-       struct inode    *dir = d_inode(dentry->d_parent);
-       struct buffer_head *bh2;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries;
-       struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
-       struct ext4_dir_entry_tail *t;
-@@ -2312,14 +2332,17 @@ out:
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                            struct dentry *dentry, struct inode *inode)
- {
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-       struct buffer_head *bh;
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct super_block *sb = dir->i_sb;
-       struct ext4_dir_entry_2 *de;
-+      int restart;
-       int err;
-+again:
-+      restart = 0;
-       frame = dx_probe(fname, dir, NULL, frames);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
-@@ -2332,33 +2355,48 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               goto cleanup;
-       }
--      BUFFER_TRACE(bh, "get_write_access");
--      err = ext4_journal_get_write_access(handle, bh);
--      if (err)
--              goto journal_error;
--
-       err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh, dentry);
-       if (err != -ENOSPC)
-               goto cleanup;
-+      err = 0;
-       /* Block full, should compress but for now just split */
-       dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
-                      dx_get_count(entries), dx_get_limit(entries)));
-       /* Need to split index? */
-       if (dx_get_count(entries) == dx_get_limit(entries)) {
-               ext4_lblk_t newblock;
--              unsigned icount = dx_get_count(entries);
--              int levels = frame - frames;
-+              int levels = frame - frames + 1;
-+              unsigned icount;
-+              int add_level = 1;
-               struct dx_entry *entries2;
-               struct dx_node *node2;
-               struct buffer_head *bh2;
--              if (levels && (dx_get_count(frames->entries) ==
--                             dx_get_limit(frames->entries))) {
--                      ext4_warning_inode(dir, "Directory index full!");
-+              while (frame > frames) {
-+                      if (dx_get_count((frame - 1)->entries) <
-+                          dx_get_limit((frame - 1)->entries)) {
-+                              add_level = 0;
-+                              break;
-+                      }
-+                      frame--; /* split higher index block */
-+                      at = frame->at;
-+                      entries = frame->entries;
-+                      restart = 1;
-+              }
-+              if (add_level && levels == ext4_dir_htree_level(sb)) {
-+                      ext4_warning(sb, "inode %lu: comm %s: index %u: reach max htree level %u",
-+                                       dir->i_ino, current->comm, levels,
-+                                       ext4_dir_htree_level(sb));
-+                      if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
-+                              ext4_warning(sb, "Large directory feature is"
-+                                               "not enabled on this "
-+                                               "filesystem");
-+                      }
-                       err = -ENOSPC;
-                       goto cleanup;
-               }
-+              icount = dx_get_count(entries);
-               bh2 = ext4_append(handle, dir, &newblock);
-               if (IS_ERR(bh2)) {
-                       err = PTR_ERR(bh2);
-@@ -2373,7 +2411,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               err = ext4_journal_get_write_access(handle, frame->bh);
-               if (err)
-                       goto journal_error;
--              if (levels) {
-+              if (!add_level) {
-                       unsigned icount1 = icount/2, icount2 = icount - icount1;
-                       unsigned hash2 = dx_get_hash(entries + icount1);
-                       dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
-@@ -2381,7 +2419,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                       BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-                       err = ext4_journal_get_write_access(handle,
--                                                           frames[0].bh);
-+                                                          (frame - 1)->bh);
-                       if (err)
-                               goto journal_error;
-@@ -2397,19 +2435,27 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                               frame->entries = entries = entries2;
-                               swap(frame->bh, bh2);
-                       }
--                      dx_insert_block(frames + 0, hash2, newblock);
--                      dxtrace(dx_show_index("node", frames[1].entries));
-+                      dx_insert_block(frame - 1, hash2, newblock);
-+                      dxtrace(dx_show_index("node", frame->entries));
-                       dxtrace(dx_show_index("node",
--                             ((struct dx_node *) bh2->b_data)->entries));
-+                             ((struct dx_node *)bh2->b_data)->entries));
-                       err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-                       if (err)
-                               goto journal_error;
-                       brelse (bh2);
-+                      err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                 (frame - 1)->bh);
-+                      if (err)
-+                              goto journal_error;
-+                      if (restart) {
-+                              err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                         frame->bh);
-+                              goto journal_error;
-+                      }
-               } else {
-                       struct dx_root_info *info;
--                      dxtrace(printk(KERN_DEBUG
--                                     "Creating second level index...\n"));
--                      memcpy((char *) entries2, (char *) entries,
-+
-+                      memcpy((char *)entries2, (char *)entries,
-                              icount * sizeof(struct dx_entry));
-                       dx_set_limit(entries2, dx_node_limit(dir));
-@@ -2418,22 +2462,17 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                       dx_set_block(entries + 0, newblock);
-                       info = dx_get_dx_info((struct ext4_dir_entry_2 *)
-                                             frames[0].bh->b_data);
--                      info->indirect_levels = 1;
--
--                      /* Add new access path frame */
--                      frame = frames + 1;
--                      frame->at = at = at - entries + entries2;
--                      frame->entries = entries = entries2;
--                      frame->bh = bh2;
--                      err = ext4_journal_get_write_access(handle,
--                                                           frame->bh);
-+                      info->indirect_levels += 1;
-+                      dxtrace(printk(KERN_DEBUG
-+                                     "Creating %d level index...\n",
-+                                     info->indirect_levels));
-+                      err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-                       if (err)
-                               goto journal_error;
--              }
--              err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
--              if (err) {
--                      ext4_std_error(inode->i_sb, err);
--                      goto cleanup;
-+                      err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-+                      brelse(bh2);
-+                      restart = 1;
-+                      goto journal_error;
-               }
-       }
-       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
-@@ -2446,10 +2486,14 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-       goto cleanup;
- journal_error:
--      ext4_std_error(dir->i_sb, err);
-+      ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-       brelse(bh);
-       dx_release(frames);
-+      /* @restart is true means htree-path has been changed, we need to
-+       * repeat dx_probe() to find out valid htree-path */
-+      if (restart && err == 0)
-+              goto again;
-       return err;
- }
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-dir.patch
deleted file mode 100644 (file)
index 607a875..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 5b6ec8f..2d22f1a 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1788,6 +1788,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt,             ENCRYPT)
-                                        EXT4_FEATURE_INCOMPAT_MMP | \
-                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
-+                                       EXT4_FEATURE_INCOMPAT_LARGEDIR | \
-                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
-                                        EXT4_FEATURE_INCOMPAT_CSUM_SEED)
- #define EXT4_FEATURE_RO_COMPAT_SUPP   (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-@@ -2261,6 +2262,9 @@ struct mmpd_data {
- # define NORET_TYPE   /**/
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND    noreturn,
-+/* htree levels for ext4 */
-+#define EXT4_HTREE_LEVEL_COMPAT 2
-+#define EXT4_HTREE_LEVEL      3
- struct ext4_xattr_ino_array {
-       unsigned int xia_count;         /* # of used item in the array */
-@@ -2882,13 +2886,16 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
-       es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
- }
--static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
-+static inline loff_t ext4_isize(struct super_block *sb,
-+                              struct ext4_inode *raw_inode)
- {
--      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
-+      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)) ||
-+          (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) &&
-+          S_ISDIR(le16_to_cpu(raw_inode->i_mode))))
-               return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
-                       le32_to_cpu(raw_inode->i_size_lo);
--      else
--              return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
-+
-+      return (loff_t)le32_to_cpu(raw_inode->i_size_lo);
- }
- static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 3034ceb..7c24ae1 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -4304,7 +4304,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
-       if (ext4_has_feature_64bit(sb))
-               ei->i_file_acl |=
-                       ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
--      inode->i_size = ext4_isize(raw_inode);
-+      inode->i_size = ext4_isize(sb, raw_inode);
-       ei->i_disksize = inode->i_size;
- #ifdef CONFIG_QUOTA
-       ei->i_reserved_quota = 0;
-@@ -4621,7 +4621,7 @@ static int ext4_do_update_inode(handle_t *handle,
-               raw_inode->i_file_acl_high =
-                       cpu_to_le16(ei->i_file_acl >> 32);
-       raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
--      if (ei->i_disksize != ext4_isize(raw_inode)) {
-+      if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
-               ext4_isize_set(raw_inode, ei->i_disksize);
-               need_datasync = 1;
-       }
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index f6465b6..3f70bca 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -517,7 +517,14 @@ struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
- static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
- {
--      return le32_to_cpu(entry->block) & 0x00ffffff;
-+      return le32_to_cpu(entry->block) & 0x0fffffff;
-+}
-+
-+static inline int
-+ext4_dir_htree_level(struct super_block *sb)
-+{
-+      return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
-+              EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
- }
- static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
-@@ -746,6 +753,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
-       u32 hash;
-+      memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
-       frame->bh = ext4_read_dirblock(dir, 0, INDEX);
-       if (IS_ERR(frame->bh))
-               return (struct dx_frame *) frame->bh;
-@@ -775,9 +783,13 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       }
-       indirect = info->indirect_levels;
--      if (indirect > 1) {
--              ext4_warning_inode(dir, "Unimplemented hash depth: %#06x",
--                                 info->indirect_levels);
-+      if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
-+              ext4_warning_inode(dir, "htree depth: %#06x exceed max depth %u",
-+                                 indirect, ext4_dir_htree_level(dir->i_sb));
-+              if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
-+                      ext4_warning(dir->i_sb, "Enable large directory "
-+                                              "feature to access it");
-+              }
-               goto fail;
-       }
-@@ -867,12 +879,20 @@ fail:
- static void dx_release(struct dx_frame *frames)
- {
-+      int i;
-+      struct dx_root_info *info;
-+
-       if (frames[0].bh == NULL)
-               return;
--      if (((struct dx_root *)frames[0].bh->b_data)->info.indirect_levels)
--              brelse(frames[1].bh);
--      brelse(frames[0].bh);
-+      for (i = 0, info = dx_get_dx_info((struct ext4_dir_entry_2 *)frames[0].bh->b_data);
-+           i <= info->indirect_levels;
-+           i++) {
-+              if (frames[i].bh == NULL)
-+                      break;
-+              brelse(frames[i].bh);
-+              frames[i].bh = NULL;
-+      }
- }
- /*
-@@ -1055,7 +1075,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
- {
-       struct dx_hash_info hinfo;
-       struct ext4_dir_entry_2 *de;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct inode *dir;
-       ext4_lblk_t block;
-       int count = 0;
-@@ -1514,7 +1534,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-                       struct ext4_dir_entry_2 **res_dir)
- {
-       struct super_block * sb = dir->i_sb;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       const struct qstr *d_name = fname->usr_fname;
-       struct buffer_head *bh;
-       ext4_lblk_t block;
-@@ -2002,7 +2022,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
- {
-       struct inode    *dir = d_inode(dentry->d_parent);
-       struct buffer_head *bh2;
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries;
-       struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
-       struct ext4_dir_entry_tail *t;
-@@ -2314,14 +2334,17 @@ out:
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                            struct dentry *dentry, struct inode *inode)
- {
--      struct dx_frame frames[2], *frame;
-+      struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-       struct buffer_head *bh;
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct super_block *sb = dir->i_sb;
-       struct ext4_dir_entry_2 *de;
-+      int restart;
-       int err;
-+again:
-+      restart = 0;
-       frame = dx_probe(fname, dir, NULL, frames);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
-@@ -2334,33 +2357,48 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               goto cleanup;
-       }
--      BUFFER_TRACE(bh, "get_write_access");
--      err = ext4_journal_get_write_access(handle, bh);
--      if (err)
--              goto journal_error;
--
-       err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh, dentry);
-       if (err != -ENOSPC)
-               goto cleanup;
-+      err = 0;
-       /* Block full, should compress but for now just split */
-       dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
-                      dx_get_count(entries), dx_get_limit(entries)));
-       /* Need to split index? */
-       if (dx_get_count(entries) == dx_get_limit(entries)) {
-               ext4_lblk_t newblock;
--              unsigned icount = dx_get_count(entries);
--              int levels = frame - frames;
-+              int levels = frame - frames + 1;
-+              unsigned icount;
-+              int add_level = 1;
-               struct dx_entry *entries2;
-               struct dx_node *node2;
-               struct buffer_head *bh2;
--              if (levels && (dx_get_count(frames->entries) ==
--                             dx_get_limit(frames->entries))) {
--                      ext4_warning_inode(dir, "Directory index full!");
-+              while (frame > frames) {
-+                      if (dx_get_count((frame - 1)->entries) <
-+                          dx_get_limit((frame - 1)->entries)) {
-+                              add_level = 0;
-+                              break;
-+                      }
-+                      frame--; /* split higher index block */
-+                      at = frame->at;
-+                      entries = frame->entries;
-+                      restart = 1;
-+              }
-+              if (add_level && levels == ext4_dir_htree_level(sb)) {
-+                      ext4_warning(sb, "inode %lu: comm %s: index %u: reach max htree level %u",
-+                                       dir->i_ino, current->comm, levels,
-+                                       ext4_dir_htree_level(sb));
-+                      if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
-+                              ext4_warning(sb, "Large directory feature is"
-+                                               "not enabled on this "
-+                                               "filesystem");
-+                      }
-                       err = -ENOSPC;
-                       goto cleanup;
-               }
-+              icount = dx_get_count(entries);
-               bh2 = ext4_append(handle, dir, &newblock);
-               if (IS_ERR(bh2)) {
-                       err = PTR_ERR(bh2);
-@@ -2375,7 +2413,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-               err = ext4_journal_get_write_access(handle, frame->bh);
-               if (err)
-                       goto journal_error;
--              if (levels) {
-+              if (!add_level) {
-                       unsigned icount1 = icount/2, icount2 = icount - icount1;
-                       unsigned hash2 = dx_get_hash(entries + icount1);
-                       dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
-@@ -2383,7 +2421,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                       BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-                       err = ext4_journal_get_write_access(handle,
--                                                           frames[0].bh);
-+                                                          (frame - 1)->bh);
-                       if (err)
-                               goto journal_error;
-@@ -2399,19 +2437,27 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                               frame->entries = entries = entries2;
-                               swap(frame->bh, bh2);
-                       }
--                      dx_insert_block(frames + 0, hash2, newblock);
--                      dxtrace(dx_show_index("node", frames[1].entries));
-+                      dx_insert_block(frame - 1, hash2, newblock);
-+                      dxtrace(dx_show_index("node", frame->entries));
-                       dxtrace(dx_show_index("node",
--                             ((struct dx_node *) bh2->b_data)->entries));
-+                             ((struct dx_node *)bh2->b_data)->entries));
-                       err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-                       if (err)
-                               goto journal_error;
-                       brelse (bh2);
-+                      err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                 (frame - 1)->bh);
-+                      if (err)
-+                              goto journal_error;
-+                      if (restart) {
-+                              err = ext4_handle_dirty_dx_node(handle, dir,
-+                                                         frame->bh);
-+                              goto journal_error;
-+                      }
-               } else {
-                       struct dx_root_info *info;
--                      dxtrace(printk(KERN_DEBUG
--                                     "Creating second level index...\n"));
--                      memcpy((char *) entries2, (char *) entries,
-+
-+                      memcpy((char *)entries2, (char *)entries,
-                              icount * sizeof(struct dx_entry));
-                       dx_set_limit(entries2, dx_node_limit(dir));
-@@ -2420,22 +2464,17 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-                       dx_set_block(entries + 0, newblock);
-                       info = dx_get_dx_info((struct ext4_dir_entry_2 *)
-                                             frames[0].bh->b_data);
--                      info->indirect_levels = 1;
--
--                      /* Add new access path frame */
--                      frame = frames + 1;
--                      frame->at = at = at - entries + entries2;
--                      frame->entries = entries = entries2;
--                      frame->bh = bh2;
--                      err = ext4_journal_get_write_access(handle,
--                                                           frame->bh);
-+                      info->indirect_levels += 1;
-+                      dxtrace(printk(KERN_DEBUG
-+                                     "Creating %d level index...\n",
-+                                     info->indirect_levels));
-+                      err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-                       if (err)
-                               goto journal_error;
--              }
--              err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
--              if (err) {
--                      ext4_std_error(inode->i_sb, err);
--                      goto cleanup;
-+                      err = ext4_handle_dirty_dx_node(handle, dir, bh2);
-+                      brelse(bh2);
-+                      restart = 1;
-+                      goto journal_error;
-               }
-       }
-       de = do_split(handle, dir, &bh, frame, &fname->hinfo);
-@@ -2447,10 +2488,14 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
-       goto cleanup;
- journal_error:
--      ext4_std_error(dir->i_sb, err);
-+      ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-       brelse(bh);
-       dx_release(frames);
-+      /* @restart is true means htree-path has been changed, we need to
-+       * repeat dx_probe() to find out valid htree-path */
-+      if (restart && err == 0)
-+              goto again;
-       return err;
- }
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-eas.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-large-eas.patch
deleted file mode 100644 (file)
index 9383900..0000000
+++ /dev/null
@@ -1,1068 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 10a2a86..217fdcc 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1781,6 +1781,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt,             ENCRYPT)
-                                        EXT4_FEATURE_INCOMPAT_EXTENTS| \
-                                        EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-+                                       EXT4_FEATURE_INCOMPAT_EA_INODE| \
-                                        EXT4_FEATURE_INCOMPAT_MMP | \
-                                        EXT4_FEATURE_INCOMPAT_DIRDATA| \
-                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
-@@ -2252,6 +2259,10 @@ struct mmpd_data {
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND    noreturn,
-+struct ext4_xattr_ino_array {
-+      unsigned int xia_count;         /* # of used item in the array */
-+      unsigned int xia_inodes[0];
-+};
- /* bitmap.c */
- extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
- void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
-@@ -2582,6 +2593,7 @@ extern void ext4_set_inode_flags(struct inode *);
- extern void ext4_get_inode_flags(struct ext4_inode_info *);
- extern int ext4_alloc_da_blocks(struct inode *inode);
- extern void ext4_set_aops(struct inode *inode);
-+extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int chunk);
- extern int ext4_writepage_trans_blocks(struct inode *);
- extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
- extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
-diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
-index 0cccda3..43ca376 100644
---- a/fs/ext4/ialloc.c
-+++ b/fs/ext4/ialloc.c
-@@ -293,7 +293,6 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
-        * as writing the quota to disk may need the lock as well.
-        */
-       dquot_initialize(inode);
--      ext4_xattr_delete_inode(handle, inode);
-       dquot_free_inode(inode);
-       dquot_drop(inode);
-diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
-index 53e8a88..5112c5a 100644
---- a/fs/ext4/inline.c
-+++ b/fs/ext4/inline.c
-@@ -61,7 +61,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
-       /* Compute min_offs. */
-       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
--              if (!entry->e_value_block && entry->e_value_size) {
-+              if (!entry->e_value_inum && entry->e_value_size) {
-                       size_t offs = le16_to_cpu(entry->e_value_offs);
-                       if (offs < min_offs)
-                               min_offs = offs;
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 04c5f63..3034ceb 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -139,8 +139,6 @@ static void ext4_invalidatepage(struct page *page, unsigned int offset,
-                               unsigned int length);
- static int __ext4_journalled_writepage(struct page *page, unsigned int len);
- static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
--static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
--                                int pextents);
- /*
-  * Test whether an inode is a fast symlink.
-@@ -189,6 +187,8 @@ void ext4_evict_inode(struct inode *inode)
- {
-       handle_t *handle;
-       int err;
-+      int extra_credits = 3;
-+      struct ext4_xattr_ino_array *lea_ino_array = NULL;
-       trace_ext4_evict_inode(inode);
-@@ -241,8 +241,8 @@ void ext4_evict_inode(struct inode *inode)
-        * protection against it
-        */
-       sb_start_intwrite(inode->i_sb);
--      handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
--                                  ext4_blocks_for_truncate(inode)+3);
-+
-+      handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, extra_credits);
-       if (IS_ERR(handle)) {
-               ext4_std_error(inode->i_sb, PTR_ERR(handle));
-               /*
-@@ -254,9 +254,36 @@ void ext4_evict_inode(struct inode *inode)
-               sb_end_intwrite(inode->i_sb);
-               goto no_delete;
-       }
--
-       if (IS_SYNC(inode))
-               ext4_handle_sync(handle);
-+
-+      /*
-+       * Delete xattr inode before deleting the main inode.
-+       */
-+      err = ext4_xattr_delete_inode(handle, inode, &lea_ino_array);
-+      if (err) {
-+              ext4_warning(inode->i_sb,
-+                           "couldn't delete inode's xattr (err %d)", err);
-+              goto stop_handle;
-+      }
-+
-+      if (!IS_NOQUOTA(inode))
-+              extra_credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+      if (!ext4_handle_has_enough_credits(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits)) {
-+              err = ext4_journal_extend(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits);
-+              if (err > 0)
-+                      err = ext4_journal_restart(handle,
-+                      ext4_blocks_for_truncate(inode) + extra_credits);
-+              if (err != 0) {
-+                      ext4_warning(inode->i_sb,
-+                                   "couldn't extend journal (err %d)", err);
-+                      goto stop_handle;
-+              }
-+      }
-+
-       inode->i_size = 0;
-       err = ext4_mark_inode_dirty(handle, inode);
-       if (err) {
-@@ -273,10 +300,10 @@ void ext4_evict_inode(struct inode *inode)
-        * enough credits left in the handle to remove the inode from
-        * the orphan list and set the dtime field.
-        */
--      if (!ext4_handle_has_enough_credits(handle, 3)) {
--              err = ext4_journal_extend(handle, 3);
-+      if (!ext4_handle_has_enough_credits(handle, extra_credits)) {
-+              err = ext4_journal_extend(handle, extra_credits);
-               if (err > 0)
--                      err = ext4_journal_restart(handle, 3);
-+                      err = ext4_journal_restart(handle, extra_credits);
-               if (err != 0) {
-                       ext4_warning(inode->i_sb,
-                                    "couldn't extend journal (err %d)", err);
-@@ -311,8 +338,12 @@ void ext4_evict_inode(struct inode *inode)
-               ext4_clear_inode(inode);
-       else
-               ext4_free_inode(handle, inode);
-+
-       ext4_journal_stop(handle);
-       sb_end_intwrite(inode->i_sb);
-+
-+      if (lea_ino_array != NULL)
-+              ext4_xattr_inode_array_free(inode, lea_ino_array);
-       return;
- no_delete:
-       ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
-@@ -5008,7 +5039,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
-  *
-  * Also account for superblock, inode, quota and xattr blocks
-  */
--static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
-+int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
-                                 int pextents)
- {
-       ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
-diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index 263002f..b69145b 100644
---- a/fs/ext4/xattr.c
-+++ b/fs/ext4/xattr.c
-@@ -202,6 +202,7 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
-       while (!IS_LAST_ENTRY(entry)) {
-               if (entry->e_value_size != 0 &&
-+                  entry->e_value_inum == 0 &&
-                   (value_start + le16_to_cpu(entry->e_value_offs) <
-                    (void *)e + sizeof(__u32) ||
-                    value_start + le16_to_cpu(entry->e_value_offs) +
-@@ -234,19 +235,26 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
- }
- static inline int
--ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
-+ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
-+                     struct inode *inode)
- {
-       size_t value_size = le32_to_cpu(entry->e_value_size);
--      if (entry->e_value_block != 0 || value_size > size ||
-+      if (!entry->e_value_inum &&
-           le16_to_cpu(entry->e_value_offs) + value_size > size)
-               return -EFSCORRUPTED;
-+      if (entry->e_value_inum &&
-+          (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
-+           le32_to_cpu(entry->e_value_inum) >
-+           le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
-+              return -EFSCORRUPTED;
-       return 0;
- }
- static int
- ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
--                    const char *name, size_t size, int sorted)
-+                    const char *name, size_t size, int sorted,
-+                    struct inode *inode)
- {
-       struct ext4_xattr_entry *entry;
-       size_t name_len;
-@@ -266,11 +274,104 @@ ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
-                       break;
-       }
-       *pentry = entry;
--      if (!cmp && ext4_xattr_check_entry(entry, size))
-+      if (!cmp && ext4_xattr_check_entry(entry, size, inode))
-               return -EFSCORRUPTED;
-       return cmp ? -ENODATA : 0;
- }
-+/*
-+ * Read the EA value from an inode.
-+ */
-+static int
-+ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
-+{
-+      unsigned long block = 0;
-+      struct buffer_head *bh = NULL;
-+      int blocksize;
-+      size_t csize, ret_size = 0;
-+
-+      if (*size == 0)
-+              return 0;
-+
-+      blocksize = ea_inode->i_sb->s_blocksize;
-+
-+      while (ret_size < *size) {
-+              csize = (*size - ret_size) > blocksize ? blocksize :
-+                                                      *size - ret_size;
-+              bh = ext4_bread(NULL, ea_inode, block, 0);
-+              if (IS_ERR(bh)) {
-+                      *size = ret_size;
-+                      return PTR_ERR(bh);
-+              }
-+              memcpy(buf, bh->b_data, csize);
-+              brelse(bh);
-+
-+              buf += csize;
-+              block += 1;
-+              ret_size += csize;
-+      }
-+
-+      *size = ret_size;
-+
-+      return 0;
-+}
-+
-+struct inode *ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, int *err)
-+{
-+      struct inode *ea_inode = NULL;
-+
-+      ea_inode = ext4_iget(parent->i_sb, ea_ino);
-+      if (IS_ERR(ea_inode) || is_bad_inode(ea_inode)) {
-+              int rc = IS_ERR(ea_inode) ? PTR_ERR(ea_inode) : 0;
-+              ext4_error(parent->i_sb, "error while reading EA inode %lu "
-+                         "/ %d %d", ea_ino, rc, is_bad_inode(ea_inode));
-+              *err = rc != 0 ? rc : -EIO;
-+              return NULL;
-+      }
-+
-+      if (EXT4_XATTR_INODE_GET_PARENT(ea_inode) != parent->i_ino ||
-+          ea_inode->i_generation != parent->i_generation) {
-+              ext4_error(parent->i_sb, "Backpointer from EA inode %lu "
-+                         "to parent invalid.", ea_ino);
-+              *err = -EINVAL;
-+              goto error;
-+      }
-+
-+      if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
-+              ext4_error(parent->i_sb, "EA inode %lu does not have "
-+                         "EXT4_EA_INODE_FL flag set.\n", ea_ino);
-+              *err = -EINVAL;
-+              goto error;
-+      }
-+
-+      *err = 0;
-+      return ea_inode;
-+
-+error:
-+      iput(ea_inode);
-+      return NULL;
-+}
-+
-+/*
-+ * Read the value from the EA inode.
-+ */
-+static int
-+ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer,
-+                   size_t *size)
-+{
-+      struct inode *ea_inode = NULL;
-+      int err;
-+
-+      ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+      if (err)
-+              return err;
-+
-+      err = ext4_xattr_inode_read(ea_inode, buffer, size);
-+      iput(ea_inode);
-+
-+      return err;
-+}
-+
- static int
- ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
-                    void *buffer, size_t buffer_size)
-@@ -303,7 +404,8 @@ bad_block:
-       }
-       ext4_xattr_cache_insert(ext4_mb_cache, bh);
-       entry = BFIRST(bh);
--      error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-+      error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
-+                                    inode);
-       if (error == -EFSCORRUPTED)
-               goto bad_block;
-       if (error)
-@@ -313,8 +415,16 @@ bad_block:
-               error = -ERANGE;
-               if (size > buffer_size)
-                       goto cleanup;
--              memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
--                     size);
-+              if (entry->e_value_inum) {
-+                      error = ext4_xattr_inode_get(inode,
-+                                           le32_to_cpu(entry->e_value_inum),
-+                                           buffer, &size);
-+                      if (error)
-+                              goto cleanup;
-+              } else {
-+                      memcpy(buffer, bh->b_data +
-+                             le16_to_cpu(entry->e_value_offs), size);
-+              }
-       }
-       error = size;
-@@ -348,7 +458,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-       if (error)
-               goto cleanup;
-       error = ext4_xattr_find_entry(&entry, name_index, name,
--                                    end - (void *)entry, 0);
-+                                    end - (void *)entry, 0, inode);
-       if (error)
-               goto cleanup;
-       size = le32_to_cpu(entry->e_value_size);
-@@ -356,8 +466,16 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-               error = -ERANGE;
-               if (size > buffer_size)
-                       goto cleanup;
--              memcpy(buffer, (void *)IFIRST(header) +
--                     le16_to_cpu(entry->e_value_offs), size);
-+              if (entry->e_value_inum) {
-+                      error = ext4_xattr_inode_get(inode,
-+                                           le32_to_cpu(entry->e_value_inum),
-+                                           buffer, &size);
-+                      if (error)
-+                              goto cleanup;
-+              } else {
-+                      memcpy(buffer, (void *)IFIRST(header) +
-+                             le16_to_cpu(entry->e_value_offs), size);
-+              }
-       }
-       error = size;
-@@ -603,7 +721,7 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
-                                   size_t *min_offs, void *base, int *total)
- {
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       size_t offs = le16_to_cpu(last->e_value_offs);
-                       if (offs < *min_offs)
-                               *min_offs = offs;
-@@ -614,16 +732,176 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
-       return (*min_offs - ((void *)last - base) - sizeof(__u32));
- }
-+/*
-+ * Write the value of the EA in an inode.
-+ */
- static int
--ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-+ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
-+                     const void *buf, int bufsize)
-+{
-+      struct buffer_head *bh = NULL;
-+      unsigned long block = 0;
-+      unsigned blocksize = ea_inode->i_sb->s_blocksize;
-+      unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
-+      int csize, wsize = 0;
-+      int ret = 0;
-+      int retries = 0;
-+
-+retry:
-+      while (ret >= 0 && ret < max_blocks) {
-+              struct ext4_map_blocks map;
-+              map.m_lblk = block += ret;
-+              map.m_len = max_blocks -= ret;
-+
-+              ret = ext4_map_blocks(handle, ea_inode, &map,
-+                                    EXT4_GET_BLOCKS_CREATE);
-+              if (ret <= 0) {
-+                      ext4_mark_inode_dirty(handle, ea_inode);
-+                      if (ret == -ENOSPC &&
-+                          ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
-+                              ret = 0;
-+                              goto retry;
-+                      }
-+                      break;
-+              }
-+      }
-+
-+      if (ret < 0)
-+              return ret;
-+
-+      block = 0;
-+      while (wsize < bufsize) {
-+              if (bh != NULL)
-+                      brelse(bh);
-+              csize = (bufsize - wsize) > blocksize ? blocksize :
-+                                                              bufsize - wsize;
-+              bh = ext4_getblk(handle, ea_inode, block, 0);
-+              if (IS_ERR(bh)) {
-+                      ret = PTR_ERR(bh);
-+                      goto out;
-+              }
-+              ret = ext4_journal_get_write_access(handle, bh);
-+              if (ret)
-+                      goto out;
-+
-+              memcpy(bh->b_data, buf, csize);
-+              set_buffer_uptodate(bh);
-+              ext4_handle_dirty_metadata(handle, ea_inode, bh);
-+
-+              buf += csize;
-+              wsize += csize;
-+              block += 1;
-+      }
-+
-+      mutex_lock(&ea_inode->i_mutex);
-+      i_size_write(ea_inode, wsize);
-+      ext4_update_i_disksize(ea_inode, wsize);
-+      mutex_unlock(&ea_inode->i_mutex);
-+
-+      ext4_mark_inode_dirty(handle, ea_inode);
-+
-+out:
-+      brelse(bh);
-+
-+      return ret;
-+}
-+
-+/*
-+ * Create an inode to store the value of a large EA.
-+ */
-+static struct inode *
-+ext4_xattr_inode_create(handle_t *handle, struct inode *inode)
-+{
-+      struct inode *ea_inode = NULL;
-+
-+      /*
-+       * Let the next inode be the goal, so we try and allocate the EA inode
-+       * in the same group, or nearby one.
-+       */
-+      ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-+                                S_IFREG|0600, NULL, inode->i_ino + 1, NULL);
-+
-+      if (!IS_ERR(ea_inode)) {
-+              ea_inode->i_op = &ext4_file_inode_operations;
-+              ea_inode->i_fop = &ext4_file_operations;
-+              ext4_set_aops(ea_inode);
-+              ea_inode->i_generation = inode->i_generation;
-+              EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
-+
-+              /*
-+               * A back-pointer from EA inode to parent inode will be useful
-+               * for e2fsck.
-+               */
-+              EXT4_XATTR_INODE_SET_PARENT(ea_inode, inode->i_ino);
-+              unlock_new_inode(ea_inode);
-+      }
-+
-+      return ea_inode;
-+}
-+
-+/*
-+ * Unlink the inode storing the value of the EA.
-+ */
-+int
-+ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino)
-+{
-+      struct inode *ea_inode = NULL;
-+      int err;
-+
-+      ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+      if (err)
-+              return err;
-+
-+      clear_nlink(ea_inode);
-+      iput(ea_inode);
-+
-+      return 0;
-+}
-+
-+/*
-+ * Add value of the EA in an inode.
-+ */
-+static int
-+ext4_xattr_inode_set(handle_t *handle, struct inode *inode, unsigned long *ea_ino,
-+                   const void *value, size_t value_len)
-+{
-+      struct inode *ea_inode = NULL;
-+      int err;
-+
-+      /* Create an inode for the EA value */
-+      ea_inode = ext4_xattr_inode_create(handle, inode);
-+      if (IS_ERR(ea_inode))
-+              return -1;
-+
-+      err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
-+      if (err)
-+              clear_nlink(ea_inode);
-+      else
-+              *ea_ino = ea_inode->i_ino;
-+
-+      iput(ea_inode);
-+
-+      return err;
-+}
-+
-+static int
-+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
-+                   handle_t *handle, struct inode *inode)
- {
-       struct ext4_xattr_entry *last;
-       size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-+      int in_inode = i->in_inode;
-+
-+      if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+               EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+          (EXT4_XATTR_SIZE(i->value_len) >
-+           EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
-+              in_inode = 1;
-       /* Compute min_offs and last. */
-       last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       size_t offs = le16_to_cpu(last->e_value_offs);
-                       if (offs < min_offs)
-                               min_offs = offs;
-@@ -631,15 +909,20 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-       }
-       free = min_offs - ((void *)last - s->base) - sizeof(__u32);
-       if (!s->not_found) {
--              if (!s->here->e_value_block && s->here->e_value_size) {
-+              if (!in_inode &&
-+                  !s->here->e_value_inum && s->here->e_value_size) {
-                       size_t size = le32_to_cpu(s->here->e_value_size);
-                       free += EXT4_XATTR_SIZE(size);
-               }
-               free += EXT4_XATTR_LEN(name_len);
-       }
-       if (i->value) {
--              if (free < EXT4_XATTR_LEN(name_len) +
--                         EXT4_XATTR_SIZE(i->value_len))
-+              size_t value_len = EXT4_XATTR_SIZE(i->value_len);
-+
-+              if (in_inode)
-+                      value_len = 0;
-+
-+              if (free < EXT4_XATTR_LEN(name_len) + value_len)
-                       return -ENOSPC;
-       }
-@@ -653,7 +936,8 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-               s->here->e_name_len = name_len;
-               memcpy(s->here->e_name, i->name, name_len);
-       } else {
--              if (!s->here->e_value_block && s->here->e_value_size) {
-+              if (!s->here->e_value_inum && s->here->e_value_size &&
-+                  s->here->e_value_offs > 0) {
-                       void *first_val = s->base + min_offs;
-                       size_t offs = le16_to_cpu(s->here->e_value_offs);
-                       void *val = s->base + offs;
-@@ -687,13 +971,18 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-                       last = s->first;
-                       while (!IS_LAST_ENTRY(last)) {
-                               size_t o = le16_to_cpu(last->e_value_offs);
--                              if (!last->e_value_block &&
-+                              if (!last->e_value_inum &&
-                                   last->e_value_size && o < offs)
-                                       last->e_value_offs =
-                                               cpu_to_le16(o + size);
-                               last = EXT4_XATTR_NEXT(last);
-                       }
-               }
-+              if (s->here->e_value_inum) {
-+                      ext4_xattr_inode_unlink(inode,
-+                                      le32_to_cpu(s->here->e_value_inum));
-+                      s->here->e_value_inum = 0;
-+              }
-               if (!i->value) {
-                       /* Remove the old name. */
-                       size_t size = EXT4_XATTR_LEN(name_len);
-@@ -707,10 +996,17 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-       if (i->value) {
-               /* Insert the new value. */
-               s->here->e_value_size = cpu_to_le32(i->value_len);
--              if (i->value_len) {
-+              if (in_inode) {
-+                      unsigned long ea_ino = le32_to_cpu(s->here->e_value_inum);
-+                      ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
-+                                           i->value_len);
-+                      s->here->e_value_inum = cpu_to_le32(ea_ino);
-+                      s->here->e_value_offs = 0;
-+              } else if (i->value_len) {
-                       size_t size = EXT4_XATTR_SIZE(i->value_len);
-                       void *val = s->base + min_offs - size;
-                       s->here->e_value_offs = cpu_to_le16(min_offs - size);
-+                      s->here->e_value_inum = 0;
-                       if (i->value == EXT4_ZERO_XATTR_VALUE) {
-                               memset(val, 0, size);
-                       } else {
-@@ -760,7 +1056,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
-               bs->s.end = bs->bh->b_data + bs->bh->b_size;
-               bs->s.here = bs->s.first;
-               error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
--                                            i->name, bs->bh->b_size, 1);
-+                                           i->name, bs->bh->b_size, 1, inode);
-               if (error && error != -ENODATA)
-                       goto cleanup;
-               bs->s.not_found = error;
-@@ -785,8 +1081,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
- #define header(x) ((struct ext4_xattr_header *)(x))
--      if (i->value && i->value_len > sb->s_blocksize)
--              return -ENOSPC;
-       if (s->base) {
-               ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
-                                       bs->bh->b_blocknr);
-@@ -802,7 +1096,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-                               ce = NULL;
-                       }
-                       ea_bdebug(bs->bh, "modifying in-place");
--                      error = ext4_xattr_set_entry(i, s);
-+                      error = ext4_xattr_set_entry(i, s, handle, inode);
-                       if (!error) {
-                               if (!IS_LAST_ENTRY(s->first))
-                                       ext4_xattr_rehash(header(s->base),
-@@ -854,7 +1148,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
-               s->end = s->base + sb->s_blocksize;
-       }
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error == -EFSCORRUPTED)
-               goto bad_block;
-       if (error)
-@@ -998,7 +1292,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
-               /* Find the named attribute. */
-               error = ext4_xattr_find_entry(&is->s.here, i->name_index,
-                                             i->name, is->s.end -
--                                            (void *)is->s.base, 0);
-+                                            (void *)is->s.base, 0, inode);
-               if (error && error != -ENODATA)
-                       return error;
-               is->s.not_found = error;
-@@ -1016,7 +1310,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
-       if (EXT4_I(inode)->i_extra_isize == 0)
-               return -ENOSPC;
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error) {
-               if (error == -ENOSPC &&
-                   ext4_has_inline_data(inode)) {
-@@ -1028,7 +1322,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
-                       error = ext4_xattr_ibody_find(inode, i, is);
-                       if (error)
-                               return error;
--                      error = ext4_xattr_set_entry(i, s);
-+                      error = ext4_xattr_set_entry(i, s, handle, inode);
-               }
-               if (error)
-                       return error;
-@@ -1054,7 +1348,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
-       if (EXT4_I(inode)->i_extra_isize == 0)
-               return -ENOSPC;
--      error = ext4_xattr_set_entry(i, s);
-+      error = ext4_xattr_set_entry(i, s, handle, inode);
-       if (error)
-               return error;
-       header = IHDR(inode, ext4_raw_inode(&is->iloc));
-@@ -1090,7 +1384,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-               .name = name,
-               .value = value,
-               .value_len = value_len,
--
-+              .in_inode = 0,
-       };
-       struct ext4_xattr_ibody_find is = {
-               .s = { .not_found = -ENODATA, },
-@@ -1155,6 +1449,15 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-                                       goto cleanup;
-                       }
-                       error = ext4_xattr_block_set(handle, inode, &i, &bs);
-+                      if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+                                      EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+                          error == -ENOSPC) {
-+                              /* xattr not fit to block, store at external
-+                               * inode */
-+                              i.in_inode = 1;
-+                              error = ext4_xattr_ibody_set(handle, inode,
-+                                                           &i, &is);
-+                      }
-                       if (error)
-                               goto cleanup;
-                       if (!is.s.not_found) {
-@@ -1201,9 +1504,22 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
-              const void *value, size_t value_len, int flags)
- {
-       handle_t *handle;
-+      struct super_block *sb = inode->i_sb;
-       int error, retries = 0;
-       int credits = ext4_jbd2_credits_xattr(inode);
-+      if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
-+          EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
-+              int nrblocks = (value_len + sb->s_blocksize - 1) >>
-+                                      sb->s_blocksize_bits;
-+
-+              /* For new inode */
-+              credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
-+
-+              /* For data blocks of EA inode */
-+              credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
-+      }
-+
- retry:
-       handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
-       if (IS_ERR(handle)) {
-@@ -1215,7 +1531,7 @@ retry:
-                                             value, value_len, flags);
-               error2 = ext4_journal_stop(handle);
-               if (error == -ENOSPC &&
--                  ext4_should_retry_alloc(inode->i_sb, &retries))
-+                  ext4_should_retry_alloc(sb, &retries))
-                       goto retry;
-               if (error == 0)
-                       error = error2;
-@@ -1237,7 +1553,7 @@ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
-       /* Adjust the value offsets of the entries */
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
--              if (!last->e_value_block && last->e_value_size) {
-+              if (!last->e_value_inum && last->e_value_size) {
-                       new_offs = le16_to_cpu(last->e_value_offs) +
-                                                       value_offs_shift;
-                       BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
-@@ -1484,21 +1800,135 @@ cleanup:
- }
-+#define EIA_INCR 16 /* must be 2^n */
-+#define EIA_MASK (EIA_INCR - 1)
-+/* Add the large xattr @ino into @lea_ino_array for later deletion.
-+ * If @lea_ino_array is new or full it will be grown and the old
-+ * contents copied over.
-+ */
-+static int
-+ext4_expand_ino_array(struct ext4_xattr_ino_array **lea_ino_array, __u32 ino)
-+{
-+      if (*lea_ino_array == NULL) {
-+              /*
-+               * Start with 15 inodes, so it fits into a power-of-two size.
-+               * If *lea_ino_array is NULL, this is essentially offsetof()
-+               */
-+              (*lea_ino_array) =
-+                      kmalloc(offsetof(struct ext4_xattr_ino_array,
-+                                       xia_inodes[EIA_MASK]),
-+                              GFP_NOFS);
-+              if (*lea_ino_array == NULL)
-+                      return -ENOMEM;
-+              (*lea_ino_array)->xia_count = 0;
-+      } else if (((*lea_ino_array)->xia_count & EIA_MASK) == EIA_MASK) {
-+              /* expand the array once all 15 + n * 16 slots are full */
-+              struct ext4_xattr_ino_array *new_array = NULL;
-+              int count = (*lea_ino_array)->xia_count;
-+
-+              /* if new_array is NULL, this is essentially offsetof() */
-+              new_array = kmalloc(
-+                              offsetof(struct ext4_xattr_ino_array,
-+                                       xia_inodes[count + EIA_INCR]),
-+                              GFP_NOFS);
-+              if (new_array == NULL)
-+                      return -ENOMEM;
-+              memcpy(new_array, *lea_ino_array,
-+                     offsetof(struct ext4_xattr_ino_array,
-+                              xia_inodes[count]));
-+              kfree(*lea_ino_array);
-+              *lea_ino_array = new_array;
-+      }
-+      (*lea_ino_array)->xia_inodes[(*lea_ino_array)->xia_count++] = ino;
-+      return 0;
-+}
-+
-+/**
-+ * Add xattr inode to orphan list
-+ */
-+static int
-+ext4_xattr_inode_orphan_add(handle_t *handle, struct inode *inode,
-+                      int credits, struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+      struct inode *ea_inode = NULL;
-+      int idx = 0, error = 0;
-+
-+      if (lea_ino_array == NULL)
-+              return 0;
-+
-+      for (; idx < lea_ino_array->xia_count; ++idx) {
-+              if (!ext4_handle_has_enough_credits(handle, credits)) {
-+                      error = ext4_journal_extend(handle, credits);
-+                      if (error > 0)
-+                              error = ext4_journal_restart(handle, credits);
-+
-+                      if (error != 0) {
-+                              ext4_warning(inode->i_sb,
-+                                      "couldn't extend journal "
-+                                      "(err %d)", error);
-+                              return error;
-+                      }
-+              }
-+              ea_inode = ext4_xattr_inode_iget(inode,
-+                              lea_ino_array->xia_inodes[idx], &error);
-+              if (error)
-+                      continue;
-+              ext4_orphan_add(handle, ea_inode);
-+              /* the inode's i_count will be released by caller */
-+      }
-+
-+      return 0;
-+}
- /*
-  * ext4_xattr_delete_inode()
-  *
-- * Free extended attribute resources associated with this inode. This
-+ * Free extended attribute resources associated with this inode. Traverse
-+ * all entries and unlink any xattr inodes associated with this inode. This
-  * is called immediately before an inode is freed. We have exclusive
-- * access to the inode.
-+ * access to the inode. If an orphan inode is deleted it will also delete any
-+ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
-+ * to ensure they belong to the parent inode and were not deleted already.
-  */
--void
--ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-+int
-+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+                      struct ext4_xattr_ino_array **lea_ino_array)
- {
-       struct buffer_head *bh = NULL;
-+      struct ext4_xattr_ibody_header *header;
-+      struct ext4_inode *raw_inode;
-+      struct ext4_iloc iloc;
-+      struct ext4_xattr_entry *entry;
-+      int credits = 3, error = 0;
--      if (!EXT4_I(inode)->i_file_acl)
-+      if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
-+              goto delete_external_ea;
-+
-+      error = ext4_get_inode_loc(inode, &iloc);
-+      if (error)
-+              goto cleanup;
-+      raw_inode = ext4_raw_inode(&iloc);
-+      header = IHDR(inode, raw_inode);
-+      for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
-+           entry = EXT4_XATTR_NEXT(entry)) {
-+              if (!entry->e_value_inum)
-+                      continue;
-+              if (ext4_expand_ino_array(lea_ino_array,
-+                                        entry->e_value_inum) != 0) {
-+                      brelse(iloc.bh);
-+                      goto cleanup;
-+              }
-+              entry->e_value_inum = 0;
-+      }
-+      brelse(iloc.bh);
-+
-+delete_external_ea:
-+      if (!EXT4_I(inode)->i_file_acl) {
-+              /* add xattr inode to orphan list */
-+              ext4_xattr_inode_orphan_add(handle, inode, credits,
-+                                              *lea_ino_array);
-               goto cleanup;
-+      }
-       bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-       if (!bh) {
-               EXT4_ERROR_INODE(inode, "block %llu read error",
-@@ -1511,11 +1941,69 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-                                EXT4_I(inode)->i_file_acl);
-               goto cleanup;
-       }
-+
-+      for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
-+           entry = EXT4_XATTR_NEXT(entry)) {
-+              if (!entry->e_value_inum)
-+                      continue;
-+              if (ext4_expand_ino_array(lea_ino_array,
-+                                        entry->e_value_inum) != 0)
-+                      goto cleanup;
-+              entry->e_value_inum = 0;
-+      }
-+
-+      /* add xattr inode to orphan list */
-+      error = ext4_xattr_inode_orphan_add(handle, inode, credits,
-+                                      *lea_ino_array);
-+      if (error != 0)
-+              goto cleanup;
-+
-+      if (!IS_NOQUOTA(inode))
-+              credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+      if (!ext4_handle_has_enough_credits(handle, credits)) {
-+              error = ext4_journal_extend(handle, credits);
-+              if (error > 0)
-+                      error = ext4_journal_restart(handle, credits);
-+              if (error != 0) {
-+                      ext4_warning(inode->i_sb,
-+                              "couldn't extend journal (err %d)", error);
-+                      goto cleanup;
-+              }
-+      }
-+
-       ext4_xattr_release_block(handle, inode, bh);
-       EXT4_I(inode)->i_file_acl = 0;
- cleanup:
-       brelse(bh);
-+
-+      return error;
-+}
-+
-+void
-+ext4_xattr_inode_array_free(struct inode *inode,
-+                          struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+      struct inode    *ea_inode = NULL;
-+      int             idx = 0;
-+      int             err;
-+
-+      if (lea_ino_array == NULL)
-+              return;
-+
-+      for (; idx < lea_ino_array->xia_count; ++idx) {
-+              ea_inode = ext4_xattr_inode_iget(inode,
-+                              lea_ino_array->xia_inodes[idx], &err);
-+              if (err)
-+                      continue;
-+              /* for inode's i_count get from ext4_xattr_delete_inode */
-+              if (!list_empty(&EXT4_I(ea_inode)->i_orphan))
-+                      iput(ea_inode);
-+              clear_nlink(ea_inode);
-+              iput(ea_inode);
-+      }
-+      kfree(lea_ino_array);
- }
- /*
-@@ -1585,10 +2073,9 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1,
-                   entry1->e_name_index != entry2->e_name_index ||
-                   entry1->e_name_len != entry2->e_name_len ||
-                   entry1->e_value_size != entry2->e_value_size ||
-+                  entry1->e_value_inum != entry2->e_value_inum ||
-                   memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
-                       return 1;
--              if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
--                      return -EFSCORRUPTED;
-               if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
-                          (char *)header2 + le16_to_cpu(entry2->e_value_offs),
-                          le32_to_cpu(entry1->e_value_size)))
-@@ -1673,7 +2160,7 @@ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
-                      *name++;
-       }
--      if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-+      if (!entry->e_value_inum && entry->e_value_size) {
-               __le32 *value = (__le32 *)((char *)header +
-                       le16_to_cpu(entry->e_value_offs));
-               for (n = (le32_to_cpu(entry->e_value_size) +
-diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
-index ddc0957..57c7ad5 100644
---- a/fs/ext4/xattr.h
-+++ b/fs/ext4/xattr.h
-@@ -43,7 +43,7 @@ struct ext4_xattr_entry {
-       __u8    e_name_len;     /* length of name */
-       __u8    e_name_index;   /* attribute name index */
-       __le16  e_value_offs;   /* offset in disk block of value */
--      __le32  e_value_block;  /* disk block attribute is stored on (n/i) */
-+      __le32  e_value_inum;   /* inode in which the value is stored */
-       __le32  e_value_size;   /* size of attribute value */
-       __le32  e_hash;         /* hash value of name and value */
-       char    e_name[0];      /* attribute name */
-@@ -68,6 +68,26 @@ struct ext4_xattr_entry {
-               EXT4_I(inode)->i_extra_isize))
- #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-+/*
-+ * Link EA inode back to parent one using i_mtime field.
-+ * Extra integer type conversion added to ignore higher
-+ * bits in i_mtime.tv_sec which might be set by ext4_get()
-+ */
-+#define EXT4_XATTR_INODE_SET_PARENT(inode, inum)      \
-+do {                                                  \
-+      (inode)->i_mtime.tv_sec = inum;                 \
-+} while(0)
-+
-+#define EXT4_XATTR_INODE_GET_PARENT(inode)            \
-+((__u32)(inode)->i_mtime.tv_sec)
-+
-+/*
-+ * The minimum size of EA value when you start storing it in an external inode
-+ * size of block - size of header - size of 1 entry - 4 null bytes
-+*/
-+#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b)                                       \
-+      ((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
-+
- #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
- #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
- #define BFIRST(bh) ENTRY(BHDR(bh)+1)
-@@ -76,10 +96,11 @@ struct ext4_xattr_entry {
- #define EXT4_ZERO_XATTR_VALUE ((void *)-1)
- struct ext4_xattr_info {
--      int name_index;
-       const char *name;
-       const void *value;
-       size_t value_len;
-+      int name_index;
-+      int in_inode;
- };
- struct ext4_xattr_search {
-@@ -107,7 +128,14 @@ extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
- extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
- extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
--extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
-+extern struct inode *ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
-+                                         int *err);
-+extern int ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino);
-+extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+                                 struct ext4_xattr_ino_array **array);
-+extern void ext4_xattr_inode_array_free(struct inode *inode,
-+                                      struct ext4_xattr_ino_array *array);
-+
- extern void ext4_xattr_put_super(struct super_block *);
- extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-misc.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-misc.patch
deleted file mode 100644 (file)
index 3691150..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 574a6c9..97d3432 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1581,6 +1581,8 @@ static inline int ext4_encrypted_inode(struct inode *inode)
- #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
-+#define JOURNAL_START_HAS_3ARGS       1
-+
- /*
-  * Codes for operating systems
-  */
-@@ -1812,7 +1814,21 @@ static inline bool ext4_has_unknown_ext##ver##_incompat_features(struct super_bl
- EXTN_FEATURE_FUNCS(2)
- EXTN_FEATURE_FUNCS(3)
--EXTN_FEATURE_FUNCS(4)
-+static inline bool ext4_has_unknown_ext4_compat_features(struct super_block *sb)
-+{
-+      return ((EXT4_SB(sb)->s_es->s_feature_compat &
-+              cpu_to_le32(~EXT4_FEATURE_COMPAT_SUPP)) != 0);
-+}
-+static inline bool ext4_has_unknown_ext4_ro_compat_features(struct super_block *sb)
-+{
-+      return ((EXT4_SB(sb)->s_es->s_feature_ro_compat &
-+              cpu_to_le32(~EXT4_FEATURE_RO_COMPAT_SUPP)) != 0);
-+}
-+static inline bool ext4_has_unknown_ext4_incompat_features(struct super_block *sb)
-+{
-+      return ((EXT4_SB(sb)->s_es->s_feature_incompat &
-+              cpu_to_le32(~EXT4_FEATURE_INCOMPAT_SUPP)) != 0);
-+}
- static inline bool ext4_has_compat_features(struct super_block *sb)
- {
-@@ -3149,6 +3165,11 @@ struct ext4_extent;
- extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
- extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
-+extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
-+                                                ext4_group_t block_group);
-+extern struct buffer_head *ext4_append(handle_t *handle,
-+                                     struct inode *inode,
-+                                     ext4_lblk_t *block);
- extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
- extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-                              struct ext4_map_blocks *map, int flags);
-diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
-index 38a740f..0cccda3 100644
---- a/fs/ext4/ialloc.c
-+++ b/fs/ext4/ialloc.c
-@@ -153,7 +153,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
-  *
-  * Return buffer_head of bitmap on success or NULL.
-  */
--static struct buffer_head *
-+struct buffer_head *
- ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
- {
-       struct ext4_group_desc *desc;
-@@ -233,6 +233,7 @@ out:
-       put_bh(bh);
-       return ERR_PTR(err);
- }
-+EXPORT_SYMBOL(ext4_read_inode_bitmap);
- /*
-  * NOTE! When we get the inode, we're the only people
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 296760b..04c5f63 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -5452,6 +5452,20 @@ out:
-       sb_end_pagefault(inode->i_sb);
-       return ret;
- }
-+EXPORT_SYMBOL(ext4_map_blocks);
-+EXPORT_SYMBOL(ext4_truncate);
-+EXPORT_SYMBOL(ext4_iget);
-+EXPORT_SYMBOL(ext4_bread);
-+EXPORT_SYMBOL(ext4_itable_unused_count);
-+EXPORT_SYMBOL(ext4_force_commit);
-+EXPORT_SYMBOL(ext4_mark_inode_dirty);
-+EXPORT_SYMBOL(ext4_get_group_desc);
-+EXPORT_SYMBOL(__ext4_journal_get_write_access);
-+EXPORT_SYMBOL(__ext4_journal_start_sb);
-+EXPORT_SYMBOL(__ext4_journal_stop);
-+EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
-+EXPORT_SYMBOL(__ext4_std_error);
-+EXPORT_SYMBOL(ext4fs_dirhash);
- int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- {
-diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 6bcf0ca..02368cb 100644
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -723,7 +723,6 @@ void ext4_mb_generate_buddy(struct super_block *sb,
-                               void *buddy, void *bitmap, ext4_group_t group)
- {
-       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
--      struct ext4_sb_info *sbi = EXT4_SB(sb);
-       ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
-       ext4_grpblk_t i = 0;
-       ext4_grpblk_t first;
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index aaa388a..36635b6 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -47,7 +47,7 @@
- #define NAMEI_RA_BLOCKS  4
- #define NAMEI_RA_SIZE      (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
--static struct buffer_head *ext4_append(handle_t *handle,
-+struct buffer_head *ext4_append(handle_t *handle,
-                                       struct inode *inode,
-                                       ext4_lblk_t *block)
- {
-@@ -157,6 +157,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
-       }
-       return bh;
- }
-+EXPORT_SYMBOL(ext4_append);
- #ifndef assert
- #define assert(test) J_ASSERT(test)
-@@ -2407,7 +2408,7 @@ EXPORT_SYMBOL(ext4_delete_entry);
-  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
-  * since this indicates that nlinks count was previously 1.
-  */
--static void ext4_inc_count(handle_t *handle, struct inode *inode)
-+void ext4_inc_count(handle_t *handle, struct inode *inode)
- {
-       inc_nlink(inode);
-       if (is_dx(inode) && inode->i_nlink > 1) {
-@@ -2418,16 +2419,18 @@ static void ext4_inc_count(handle_t *handle, struct inode *inode)
-               }
-       }
- }
-+EXPORT_SYMBOL(ext4_inc_count);
- /*
-  * If a directory had nlink == 1, then we should let it be 1. This indicates
-  * directory has >EXT4_LINK_MAX subdirs.
-  */
--static void ext4_dec_count(handle_t *handle, struct inode *inode)
-+void ext4_dec_count(handle_t *handle, struct inode *inode)
- {
-       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
-               drop_nlink(inode);
- }
-+EXPORT_SYMBOL(ext4_dec_count);
- static int ext4_add_nondir(handle_t *handle,
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop-001.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop-001.patch
deleted file mode 100644 (file)
index e111d4b..0000000
+++ /dev/null
@@ -1,1927 +0,0 @@
-diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
-index f52cf54..3f16939 100644
---- a/fs/ext4/Makefile
-+++ b/fs/ext4/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
- ext4-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
-               ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+              htree_lock.o \
-               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-               mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
-               xattr_trusted.o inline.o readpage.o sysfs.o
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 2d22f1a..005c9b3 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -28,6 +28,7 @@
- #include <linux/timer.h>
- #include <linux/version.h>
- #include <linux/wait.h>
-+#include <linux/htree_lock.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #include <linux/ratelimit.h>
-@@ -880,6 +881,9 @@ struct ext4_inode_info {
-       __u32   i_dtime;
-       ext4_fsblk_t    i_file_acl;
-+      /* following fields for parallel directory operations -bzzz */
-+      struct semaphore i_append_sem;
-+
-       /*
-        * i_block_group is the number of the block group which contains
-        * this file's inode.  Constant across the lifetime of the inode,
-@@ -2086,6 +2090,71 @@ struct dx_hash_info
-       u32             *seed;
- };
-+/* assume name-hash is protected by upper layer */
-+#define EXT4_HTREE_LOCK_HASH  0
-+
-+enum ext4_pdo_lk_types {
-+#if EXT4_HTREE_LOCK_HASH
-+      EXT4_LK_HASH,
-+#endif
-+      EXT4_LK_DX,             /* index block */
-+      EXT4_LK_DE,             /* directory entry block */
-+      EXT4_LK_SPIN,           /* spinlock */
-+      EXT4_LK_MAX,
-+};
-+
-+/* read-only bit */
-+#define EXT4_LB_RO(b)         (1 << (b))
-+/* read + write, high bits for writer */
-+#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
-+
-+enum ext4_pdo_lock_bits {
-+      /* DX lock bits */
-+      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
-+      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
-+      /* DE lock bits */
-+      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
-+      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
-+      /* DX spinlock bits */
-+      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
-+      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
-+      /* accurate searching */
-+      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
-+};
-+
-+enum ext4_pdo_lock_opc {
-+      /* external */
-+      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
-+      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
-+
-+      /* internal */
-+      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
-+      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
-+};
-+
-+extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
-+#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
-+
-+extern struct htree_lock *ext4_htree_lock_alloc(void);
-+#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
-+
-+extern void ext4_htree_lock(struct htree_lock *lck,
-+                          struct htree_lock_head *lhead,
-+                          struct inode *dir, unsigned flags);
-+#define ext4_htree_unlock(lck)                  htree_unlock(lck)
-+
-+extern struct buffer_head *__ext4_find_entry(struct inode *dir,
-+                                      const struct qstr *d_name,
-+                                      struct ext4_dir_entry_2 **res_dir,
-+                                      int *inlined, struct htree_lock *lck);
-+extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck);
- /* 32 and 64 bit signed EOF for dx directories */
- #define EXT4_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
-@@ -2475,8 +2544,16 @@ int ext4_insert_dentry(struct inode *dir,
-                      struct ext4_filename *fname, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-+      /* Disable it for ldiskfs, because going from a DX directory to
-+       * a non-DX directory while it is in use will completely break
-+       * the htree-locking.
-+       * If we really want to support this operation in the future,
-+       * we need to exclusively lock the directory at here which will
-+       * increase complexity of code */
-+#if 0
-       if (!ext4_has_feature_dir_index(inode->i_sb))
-               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-+#endif
- }
- static unsigned char ext4_filetype_table[] = {
-       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
-new file mode 100644
-index 0000000..99e7375
---- /dev/null
-+++ b/fs/ext4/htree_lock.c
-@@ -0,0 +1,891 @@
-+/*
-+ * fs/ext4/htree_lock.c
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+#include <linux/jbd2.h>
-+#include <linux/hash.h>
-+#include <linux/module.h>
-+#include <linux/htree_lock.h>
-+
-+enum {
-+      HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
-+      HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
-+      HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
-+      HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
-+      HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
-+};
-+
-+enum {
-+      HTREE_LOCK_COMPAT_EX    = 0,
-+      HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
-+      HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
-+      HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
-+      HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
-+                                HTREE_LOCK_BIT_PW,
-+};
-+
-+static int htree_lock_compat[] = {
-+      [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
-+      [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
-+      [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
-+      [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
-+      [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
-+};
-+
-+/* max allowed htree-lock depth.
-+ * We only need depth=3 for ext4 although user can have higher value. */
-+#define HTREE_LOCK_DEP_MAX    16
-+
-+#ifdef HTREE_LOCK_DEBUG
-+
-+static char *hl_name[] = {
-+      [HTREE_LOCK_EX]         "EX",
-+      [HTREE_LOCK_PW]         "PW",
-+      [HTREE_LOCK_PR]         "PR",
-+      [HTREE_LOCK_CW]         "CW",
-+      [HTREE_LOCK_CR]         "CR",
-+};
-+
-+/* lock stats */
-+struct htree_lock_node_stats {
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      retried[HTREE_LOCK_MAX];
-+      unsigned long long      events;
-+};
-+
-+struct htree_lock_stats {
-+      struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+};
-+
-+static struct htree_lock_stats hl_stats;
-+
-+void htree_lock_stat_reset(void)
-+{
-+      memset(&hl_stats, 0, sizeof(hl_stats));
-+}
-+
-+void htree_lock_stat_print(int depth)
-+{
-+      int     i;
-+      int     j;
-+
-+      printk(KERN_DEBUG "HTREE LOCK STATS:\n");
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
-+                     hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
-+      }
-+      for (i = 0; i < depth; i++) {
-+              printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
-+              for (j = 0; j < HTREE_LOCK_MAX; j++) {
-+                      printk(KERN_DEBUG
-+                              "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
-+                              hl_name[j], hl_stats.nodes[i].granted[j],
-+                              hl_stats.nodes[i].blocked[j],
-+                              hl_stats.nodes[i].retried[j]);
-+              }
-+      }
-+}
-+
-+#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
-+#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
-+#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
-+#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
-+#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
-+#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
-+
-+#else /* !DEBUG */
-+
-+void htree_lock_stat_reset(void) {}
-+void htree_lock_stat_print(int depth) {}
-+
-+#define lk_grant_inc(m)             do {} while (0)
-+#define lk_block_inc(m)             do {} while (0)
-+#define ln_grant_inc(d, m)    do {} while (0)
-+#define ln_block_inc(d, m)    do {} while (0)
-+#define ln_retry_inc(d, m)    do {} while (0)
-+#define ln_event_inc(d)             do {} while (0)
-+
-+#endif /* DEBUG */
-+
-+EXPORT_SYMBOL(htree_lock_stat_reset);
-+EXPORT_SYMBOL(htree_lock_stat_print);
-+
-+#define HTREE_DEP_ROOT                  (-1)
-+
-+#define htree_spin_lock(lhead, dep)                           \
-+      bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
-+#define htree_spin_unlock(lhead, dep)                         \
-+      bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
-+
-+#define htree_key_event_ignore(child, ln)                     \
-+      (!((child)->lc_events & (1 << (ln)->ln_mode)))
-+
-+static int
-+htree_key_list_empty(struct htree_lock_node *ln)
-+{
-+      return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
-+}
-+
-+static void
-+htree_key_list_del_init(struct htree_lock_node *ln)
-+{
-+      struct htree_lock_node *tmp = NULL;
-+
-+      if (!list_empty(&ln->ln_minor_list)) {
-+              tmp = list_entry(ln->ln_minor_list.next,
-+                               struct htree_lock_node, ln_minor_list);
-+              list_del_init(&ln->ln_minor_list);
-+      }
-+
-+      if (list_empty(&ln->ln_major_list))
-+              return;
-+
-+      if (tmp == NULL) { /* not on minor key list */
-+              list_del_init(&ln->ln_major_list);
-+      } else {
-+              BUG_ON(!list_empty(&tmp->ln_major_list));
-+              list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
-+      }
-+}
-+
-+static void
-+htree_key_list_replace_init(struct htree_lock_node *old,
-+                          struct htree_lock_node *new)
-+{
-+      if (!list_empty(&old->ln_major_list))
-+              list_replace_init(&old->ln_major_list, &new->ln_major_list);
-+
-+      if (!list_empty(&old->ln_minor_list))
-+              list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
-+}
-+
-+static void
-+htree_key_event_enqueue(struct htree_lock_child *child,
-+                      struct htree_lock_node *ln, int dep, void *event)
-+{
-+      struct htree_lock_node *tmp;
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
-+      if (event == NULL || htree_key_event_ignore(child, ln))
-+              return;
-+
-+      /* shouldn't be a very long list */
-+      list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
-+              if (tmp->ln_mode == HTREE_LOCK_NL) {
-+                      ln_event_inc(dep);
-+                      if (child->lc_callback != NULL)
-+                              child->lc_callback(tmp->ln_ev_target, event);
-+              }
-+      }
-+}
-+
-+static int
-+htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
-+                      unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
-+      struct htree_lock_node *newln = &newlk->lk_nodes[dep];
-+      struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      /* NB: we only expect PR/PW lock mode at here, only these two modes are
-+       * allowed for htree_node_lock(asserted in htree_node_lock_internal),
-+       * NL is only used for listener, user can't directly require NL mode */
-+      if ((curln->ln_mode == HTREE_LOCK_NL) ||
-+          (curln->ln_mode != HTREE_LOCK_PW &&
-+           newln->ln_mode != HTREE_LOCK_PW)) {
-+              /* no conflict, attach it on granted list of @curlk */
-+              if (curln->ln_mode != HTREE_LOCK_NL) {
-+                      list_add(&newln->ln_granted_list,
-+                               &curln->ln_granted_list);
-+              } else {
-+                      /* replace key owner */
-+                      htree_key_list_replace_init(curln, newln);
-+              }
-+
-+              list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+              htree_key_event_enqueue(child, newln, dep, event);
-+              ln_grant_inc(dep, newln->ln_mode);
-+              return 1; /* still hold lh_lock */
-+      }
-+
-+      if (!wait) { /* can't grant and don't want to wait */
-+              ln_retry_inc(dep, newln->ln_mode);
-+              newln->ln_mode = HTREE_LOCK_INVAL;
-+              return -1; /* don't wait and just return -1 */
-+      }
-+
-+      newlk->lk_task = current;
-+      set_current_state(TASK_UNINTERRUPTIBLE);
-+      /* conflict, attach it on blocked list of curlk */
-+      list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
-+      list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+      ln_block_inc(dep, newln->ln_mode);
-+
-+      htree_spin_unlock(newlk->lk_head, dep);
-+      /* wait to be given the lock */
-+      if (newlk->lk_task != NULL)
-+              schedule();
-+      /* granted, no doubt, wake up will set me RUNNING */
-+      if (event == NULL || htree_key_event_ignore(child, newln))
-+              return 0; /* granted without lh_lock */
-+
-+      htree_spin_lock(newlk->lk_head, dep);
-+      htree_key_event_enqueue(child, newln, dep, event);
-+      return 1; /* still hold lh_lock */
-+}
-+
-+/*
-+ * get PR/PW access to particular tree-node according to @dep and @key,
-+ * it will return -1 if @wait is false and can't immediately grant this lock.
-+ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
-+ * @event if it's not NULL.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
-+                       htree_lock_mode_t mode, u32 key, unsigned dep,
-+                       int wait, void *event)
-+{
-+      LIST_HEAD(list);
-+      struct htree_lock       *tmp;
-+      struct htree_lock       *tmp2;
-+      u16                     major;
-+      u16                     minor;
-+      u8                      reverse;
-+      u8                      ma_bits;
-+      u8                      mi_bits;
-+
-+      BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+
-+      key = hash_long(key, lhead->lh_hbits);
-+
-+      mi_bits = lhead->lh_hbits >> 1;
-+      ma_bits = lhead->lh_hbits - mi_bits;
-+
-+      lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
-+      lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
-+      lck->lk_nodes[dep].ln_mode = mode;
-+
-+      /*
-+       * The major key list is an ordered list, so searches are started
-+       * at the end of the list that is numerically closer to major_key,
-+       * so at most half of the list will be walked (for well-distributed
-+       * keys). The list traversal aborts early if the expected key
-+       * location is passed.
-+       */
-+      reverse = (major >= (1 << (ma_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp,
-+                                      &lhead->lh_children[dep].lc_list,
-+                                      lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key < major) {
-+                              /* attach _after_ @tmp */
-+                              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                                       &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                       &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+
-+      } else {
-+              list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
-+                                  lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key > major) {
-+                              /* insert _before_ @tmp */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                                      &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                            &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+      }
-+
-+ search_minor:
-+      /*
-+       * NB: minor_key list doesn't have a "head", @list is just a
-+       * temporary stub for helping list searching, make sure it's removed
-+       * after searching.
-+       * minor_key list is an ordered list too.
-+       */
-+      list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
-+
-+      reverse = (minor >= (1 << (mi_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp2, &list,
-+                                          lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
-+                              /* attach _after_ @tmp2 */
-+                              list_add(&lck->lk_nodes[dep].ln_minor_list,
-+                                       &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
-+
-+      } else {
-+              list_for_each_entry(tmp2, &list,
-+                                  lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
-+                              /* insert _before_ @tmp2 */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
-+                                      &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
-+      }
-+
-+ out_grant_minor:
-+      if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
-+              /* new lock @lck is the first one on minor_key list, which
-+               * means it has the smallest minor_key and it should
-+               * replace @tmp as minor_key owner */
-+              list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
-+                                &lck->lk_nodes[dep].ln_major_list);
-+      }
-+      /* remove the temporary head */
-+      list_del(&list);
-+
-+ out_grant_major:
-+      ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
-+      return 1; /* granted with holding lh_lock */
-+
-+ out_enqueue:
-+      list_del(&list); /* remove temprary head */
-+      return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
-+}
-+
-+/*
-+ * release the key of @lck at level @dep, and grant any blocked locks.
-+ * caller will still listen on @key if @event is not NULL, which means
-+ * caller can see a event (by event_cb) while granting any lock with
-+ * the same key at level @dep.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
-+ */
-+static void
-+htree_node_unlock_internal(struct htree_lock_head *lhead,
-+                         struct htree_lock *curlk, unsigned dep, void *event)
-+{
-+      struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
-+      struct htree_lock       *grtlk = NULL;
-+      struct htree_lock_node  *grtln;
-+      struct htree_lock       *poslk;
-+      struct htree_lock       *tmplk;
-+
-+      if (!htree_node_is_granted(curlk, dep))
-+              return;
-+
-+      if (!list_empty(&curln->ln_granted_list)) {
-+              /* there is another granted lock */
-+              grtlk = list_entry(curln->ln_granted_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_granted_list);
-+              list_del_init(&curln->ln_granted_list);
-+      }
-+
-+      if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
-+              /*
-+               * @curlk is the only granted lock, so we confirmed:
-+               * a) curln is key owner (attached on major/minor_list),
-+               *    so if there is any blocked lock, it should be attached
-+               *    on curln->ln_blocked_list
-+               * b) we always can grant the first blocked lock
-+               */
-+              grtlk = list_entry(curln->ln_blocked_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_blocked_list);
-+              BUG_ON(grtlk->lk_task == NULL);
-+              wake_up_process(grtlk->lk_task);
-+      }
-+
-+      if (event != NULL &&
-+          lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
-+              curln->ln_ev_target = event;
-+              curln->ln_mode = HTREE_LOCK_NL; /* listen! */
-+      } else {
-+              curln->ln_mode = HTREE_LOCK_INVAL;
-+      }
-+
-+      if (grtlk == NULL) { /* I must be the only one locking this key */
-+              struct htree_lock_node *tmpln;
-+
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
-+                      return;
-+
-+              /* not listening */
-+              if (list_empty(&curln->ln_alive_list)) { /* no more listener */
-+                      htree_key_list_del_init(curln);
-+                      return;
-+              }
-+
-+              tmpln = list_entry(curln->ln_alive_list.next,
-+                                 struct htree_lock_node, ln_alive_list);
-+
-+              BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
-+
-+              htree_key_list_replace_init(curln, tmpln);
-+              list_del_init(&curln->ln_alive_list);
-+
-+              return;
-+      }
-+
-+      /* have a granted lock */
-+      grtln = &grtlk->lk_nodes[dep];
-+      if (!list_empty(&curln->ln_blocked_list)) {
-+              /* only key owner can be on both lists */
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (list_empty(&grtln->ln_blocked_list)) {
-+                      list_add(&grtln->ln_blocked_list,
-+                               &curln->ln_blocked_list);
-+              }
-+              list_del_init(&curln->ln_blocked_list);
-+      }
-+      /*
-+       * NB: this is the tricky part:
-+       * We have only two modes for child-lock (PR and PW), also,
-+       * only owner of the key (attached on major/minor_list) can be on
-+       * both blocked_list and granted_list, so @grtlk must be one
-+       * of these two cases:
-+       *
-+       * a) @grtlk is taken from granted_list, which means we've granted
-+       *    more than one lock so @grtlk has to be PR, the first blocked
-+       *    lock must be PW and we can't grant it at all.
-+       *    So even @grtlk is not owner of the key (empty blocked_list),
-+       *    we don't care because we can't grant any lock.
-+       * b) we just grant a new lock which is taken from head of blocked
-+       *    list, and it should be the first granted lock, and it should
-+       *    be the first one linked on blocked_list.
-+       *
-+       * Either way, we can get correct result by iterating blocked_list
-+       * of @grtlk, and don't have to bother on how to find out
-+       * owner of current key.
-+       */
-+      list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
-+                               lk_nodes[dep].ln_blocked_list) {
-+              if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
-+                  poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
-+                      break;
-+              /* grant all readers */
-+              list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
-+              list_add(&poslk->lk_nodes[dep].ln_granted_list,
-+                       &grtln->ln_granted_list);
-+
-+              BUG_ON(poslk->lk_task == NULL);
-+              wake_up_process(poslk->lk_task);
-+      }
-+
-+      /* if @curln is the owner of this key, replace it with @grtln */
-+      if (!htree_key_list_empty(curln))
-+              htree_key_list_replace_init(curln, grtln);
-+
-+      if (curln->ln_mode == HTREE_LOCK_INVAL)
-+              list_del_init(&curln->ln_alive_list);
-+}
-+
-+/*
-+ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
-+ * and 0 only if @wait is false and can't grant it immediately
-+ */
-+int
-+htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                  u32 key, unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, dep);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_node_lock_try);
-+
-+/* it's wrapper of htree_node_unlock_internal */
-+void
-+htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      htree_node_unlock_internal(lhead, lck, dep, event);
-+      htree_spin_unlock(lhead, dep);
-+}
-+EXPORT_SYMBOL(htree_node_unlock);
-+
-+/* stop listening on child-lock level @dep */
-+void
-+htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
-+{
-+      struct htree_lock_node *ln = &lck->lk_nodes[dep];
-+      struct htree_lock_node *tmp;
-+
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+      BUG_ON(!list_empty(&ln->ln_blocked_list));
-+      BUG_ON(!list_empty(&ln->ln_granted_list));
-+
-+      if (!htree_node_is_listening(lck, dep))
-+              return;
-+
-+      htree_spin_lock(lck->lk_head, dep);
-+      ln->ln_mode = HTREE_LOCK_INVAL;
-+      ln->ln_ev_target = NULL;
-+
-+      if (htree_key_list_empty(ln)) { /* not owner */
-+              list_del_init(&ln->ln_alive_list);
-+              goto out;
-+      }
-+
-+      /* I'm the owner... */
-+      if (list_empty(&ln->ln_alive_list)) { /* no more listener */
-+              htree_key_list_del_init(ln);
-+              goto out;
-+      }
-+
-+      tmp = list_entry(ln->ln_alive_list.next,
-+                       struct htree_lock_node, ln_alive_list);
-+
-+      BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
-+      htree_key_list_replace_init(ln, tmp);
-+      list_del_init(&ln->ln_alive_list);
-+ out:
-+      htree_spin_unlock(lck->lk_head, dep);
-+}
-+EXPORT_SYMBOL(htree_node_stop_listen);
-+
-+/* release all child-locks if we have any */
-+static void
-+htree_node_release_all(struct htree_lock *lck)
-+{
-+      int     i;
-+
-+      for (i = 0; i < lck->lk_depth; i++) {
-+              if (htree_node_is_granted(lck, i))
-+                      htree_node_unlock(lck, i, NULL);
-+              else if (htree_node_is_listening(lck, i))
-+                      htree_node_stop_listen(lck, i);
-+      }
-+}
-+
-+/*
-+ * obtain htree lock, it could be blocked inside if there's conflict
-+ * with any granted or blocked lock and @wait is true.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_lock_internal(struct htree_lock *lck, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int     granted = 0;
-+      int     blocked = 0;
-+      int     i;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+              if (lhead->lh_nblocked[i] != 0)
-+                      blocked |= 1 << i;
-+      }
-+      if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
-+          (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
-+              /* will block current lock even it just conflicts with any
-+               * other blocked lock, so lock like EX wouldn't starve */
-+              if (!wait)
-+                      return -1;
-+              lhead->lh_nblocked[lck->lk_mode]++;
-+              lk_block_inc(lck->lk_mode);
-+
-+              lck->lk_task = current;
-+              list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
-+
-+retry:
-+              set_current_state(TASK_UNINTERRUPTIBLE);
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+              /* wait to be given the lock */
-+              if (lck->lk_task != NULL)
-+                      schedule();
-+              /* granted, no doubt. wake up will set me RUNNING.
-+               * Since thread would be waken up accidentally,
-+               * so we need check lock whether granted or not again. */
-+              if (!list_empty(&lck->lk_blocked_list)) {
-+                      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+                      if (list_empty(&lck->lk_blocked_list)) {
-+                              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+                              return 0;
-+                      }
-+                      goto retry;
-+              }
-+              return 0; /* without lh_lock */
-+      }
-+      lhead->lh_ngranted[lck->lk_mode]++;
-+      lk_grant_inc(lck->lk_mode);
-+      return 1;
-+}
-+
-+/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
-+static void
-+htree_unlock_internal(struct htree_lock *lck)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      struct htree_lock *tmp;
-+      struct htree_lock *tmp2;
-+      int granted = 0;
-+      int i;
-+
-+      BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
-+
-+      lhead->lh_ngranted[lck->lk_mode]--;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+      }
-+      list_for_each_entry_safe(tmp, tmp2,
-+                               &lhead->lh_blocked_list, lk_blocked_list) {
-+              /* conflict with any granted lock? */
-+              if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
-+                      break;
-+
-+              list_del_init(&tmp->lk_blocked_list);
-+
-+              BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
-+
-+              lhead->lh_nblocked[tmp->lk_mode]--;
-+              lhead->lh_ngranted[tmp->lk_mode]++;
-+              granted |= 1 << tmp->lk_mode;
-+
-+              BUG_ON(tmp->lk_task == NULL);
-+              wake_up_process(tmp->lk_task);
-+      }
-+}
-+
-+/* it's wrapper of htree_lock_internal and exported interface.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+             htree_lock_mode_t mode, int wait)
-+{
-+      int     rc;
-+
-+      BUG_ON(lck->lk_depth > lhead->lh_depth);
-+      BUG_ON(lck->lk_head != NULL);
-+      BUG_ON(lck->lk_task != NULL);
-+
-+      lck->lk_head = lhead;
-+      lck->lk_mode = mode;
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_lock_try);
-+
-+/* it's wrapper of htree_unlock_internal and exported interface.
-+ * It will release all htree_node_locks and htree_lock */
-+void
-+htree_unlock(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_head == NULL);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
-+      lck->lk_head = NULL;
-+      lck->lk_task = NULL;
-+}
-+EXPORT_SYMBOL(htree_unlock);
-+
-+/* change lock mode */
-+void
-+htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
-+{
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+      lck->lk_mode = mode;
-+}
-+EXPORT_SYMBOL(htree_change_mode);
-+
-+/* release htree lock, and lock it again with new mode.
-+ * This function will first release all htree_node_locks and htree_lock,
-+ * then try to gain htree_lock with new @mode.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(lhead == NULL);
-+      BUG_ON(lck->lk_mode == mode);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      lck->lk_mode = mode;
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_change_lock_try);
-+
-+/* create a htree_lock head with @depth levels (number of child-locks),
-+ * it is a per resoruce structure */
-+struct htree_lock_head *
-+htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
-+{
-+      struct htree_lock_head *lhead;
-+      int  i;
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+
-+      lhead = kzalloc(offsetof(struct htree_lock_head,
-+                               lh_children[depth]) + priv, GFP_NOFS);
-+      if (lhead == NULL)
-+              return NULL;
-+
-+      if (hbits < HTREE_HBITS_MIN)
-+              lhead->lh_hbits = HTREE_HBITS_MIN;
-+      else if (hbits > HTREE_HBITS_MAX)
-+              lhead->lh_hbits = HTREE_HBITS_MAX;
-+
-+      lhead->lh_lock = 0;
-+      lhead->lh_depth = depth;
-+      INIT_LIST_HEAD(&lhead->lh_blocked_list);
-+      if (priv > 0) {
-+              lhead->lh_private = (void *)lhead +
-+                      offsetof(struct htree_lock_head, lh_children[depth]);
-+      }
-+
-+      for (i = 0; i < depth; i++) {
-+              INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
-+              lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(htree_lock_head_alloc);
-+
-+/* free the htree_lock head */
-+void
-+htree_lock_head_free(struct htree_lock_head *lhead)
-+{
-+      int     i;
-+
-+      BUG_ON(!list_empty(&lhead->lh_blocked_list));
-+      for (i = 0; i < lhead->lh_depth; i++)
-+              BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
-+      kfree(lhead);
-+}
-+EXPORT_SYMBOL(htree_lock_head_free);
-+
-+/* register event callback for @events of child-lock at level @dep */
-+void
-+htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
-+                      unsigned events, htree_event_cb_t callback)
-+{
-+      BUG_ON(lhead->lh_depth <= dep);
-+      lhead->lh_children[dep].lc_events = events;
-+      lhead->lh_children[dep].lc_callback = callback;
-+}
-+EXPORT_SYMBOL(htree_lock_event_attach);
-+
-+/* allocate a htree_lock, which is per-thread structure, @pbytes is some
-+ * extra-bytes as private data for caller */
-+struct htree_lock *
-+htree_lock_alloc(unsigned depth, unsigned pbytes)
-+{
-+      struct htree_lock *lck;
-+      int i = offsetof(struct htree_lock, lk_nodes[depth]);
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+      lck = kzalloc(i + pbytes, GFP_NOFS);
-+      if (lck == NULL)
-+              return NULL;
-+
-+      if (pbytes != 0)
-+              lck->lk_private = (void *)lck + i;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+      lck->lk_depth = depth;
-+      INIT_LIST_HEAD(&lck->lk_blocked_list);
-+
-+      for (i = 0; i < depth; i++) {
-+              struct htree_lock_node *node = &lck->lk_nodes[i];
-+
-+              node->ln_mode = HTREE_LOCK_INVAL;
-+              INIT_LIST_HEAD(&node->ln_major_list);
-+              INIT_LIST_HEAD(&node->ln_minor_list);
-+              INIT_LIST_HEAD(&node->ln_alive_list);
-+              INIT_LIST_HEAD(&node->ln_blocked_list);
-+              INIT_LIST_HEAD(&node->ln_granted_list);
-+      }
-+
-+      return lck;
-+}
-+EXPORT_SYMBOL(htree_lock_alloc);
-+
-+/* free htree_lock node */
-+void
-+htree_lock_free(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
-+      kfree(lck);
-+}
-+EXPORT_SYMBOL(htree_lock_free);
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 2543b8f..e70b61a 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -52,6 +52,7 @@ struct buffer_head *ext4_append(handle_t *handle,
-                                       ext4_lblk_t *block)
- {
-       struct buffer_head *bh;
-+      struct ext4_inode_info *ei = EXT4_I(inode);
-       int err;
-       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
-@@ -59,15 +60,22 @@ struct buffer_head *ext4_append(handle_t *handle,
-                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
-               return ERR_PTR(-ENOSPC);
-+      /* with parallel dir operations all appends
-+      * have to be serialized -bzzz */
-+      down(&ei->i_append_sem);
-+
-       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-       bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
--      if (IS_ERR(bh))
-+      if (IS_ERR(bh)) {
-+              up(&ei->i_append_sem);
-               return bh;
-+      }
-       inode->i_size += inode->i_sb->s_blocksize;
-       EXT4_I(inode)->i_disksize = inode->i_size;
-       BUFFER_TRACE(bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, bh);
-+      up(&ei->i_append_sem);
-       if (err) {
-               brelse(bh);
-               ext4_std_error(inode->i_sb, err);
-@@ -247,7 +255,8 @@ static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(struct ext4_filename *fname,
-                                struct inode *dir,
-                                struct dx_hash_info *hinfo,
--                               struct dx_frame *frame);
-+                               struct dx_frame *frame,
-+                               struct htree_lock *lck);
- static void dx_release(struct dx_frame *frames);
- static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
-                      unsigned blocksize, struct dx_hash_info *hinfo,
-@@ -261,12 +270,13 @@ static void dx_insert_block(struct dx_frame *frame,
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash);
-+                               __u32 *start_hash, struct htree_lock *lck);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-               struct ext4_filename *fname,
--              struct ext4_dir_entry_2 **res_dir);
-+              struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
--                           struct dentry *dentry, struct inode *inode);
-+                           struct dentry *dentry, struct inode *inode,
-+                           struct htree_lock *lck);
- /* checksumming functions */
- void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
-@@ -733,6 +743,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
- }
- #endif /* DX_DEBUG */
-+/* private data for htree_lock */
-+struct ext4_dir_lock_data {
-+      unsigned                ld_flags;  /* bits-map for lock types */
-+      unsigned                ld_count;  /* # entries of the last DX block */
-+      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
-+      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
-+};
-+
-+#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
-+#define ext4_find_entry(dir, name, dirent, inline) \
-+                      __ext4_find_entry(dir, name, dirent, inline, NULL)
-+#define ext4_add_entry(handle, dentry, inode) \
-+                      __ext4_add_entry(handle, dentry, inode, NULL)
-+
-+/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
-+#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
-+
-+static void ext4_htree_event_cb(void *target, void *event)
-+{
-+      u64 *block = (u64 *)target;
-+
-+      if (*block == dx_get_block((struct dx_entry *)event))
-+              *block = EXT4_HTREE_NODE_CHANGED;
-+}
-+
-+struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
-+{
-+      struct htree_lock_head *lhead;
-+
-+      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
-+      if (lhead != NULL) {
-+              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
-+                                      ext4_htree_event_cb);
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
-+
-+struct htree_lock *ext4_htree_lock_alloc(void)
-+{
-+      return htree_lock_alloc(EXT4_LK_MAX,
-+                              sizeof(struct ext4_dir_lock_data));
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_alloc);
-+
-+static htree_lock_mode_t ext4_htree_mode(unsigned flags)
-+{
-+      switch (flags) {
-+      default: /* 0 or unknown flags require EX lock */
-+              return HTREE_LOCK_EX;
-+      case EXT4_HLOCK_READDIR:
-+              return HTREE_LOCK_PR;
-+      case EXT4_HLOCK_LOOKUP:
-+              return HTREE_LOCK_CR;
-+      case EXT4_HLOCK_DEL:
-+      case EXT4_HLOCK_ADD:
-+              return HTREE_LOCK_CW;
-+      }
-+}
-+
-+/* return PR for read-only operations, otherwise return EX */
-+static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
-+{
-+      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
-+
-+      /* 0 requires EX lock */
-+      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
-+}
-+
-+static int ext4_htree_safe_locked(struct htree_lock *lck)
-+{
-+      int writer;
-+
-+      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
-+              return 1;
-+
-+      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
-+               EXT4_LB_DE;
-+      if (writer) /* all readers & writers are excluded? */
-+              return lck->lk_mode == HTREE_LOCK_EX;
-+
-+      /* all writers are excluded? */
-+      return lck->lk_mode == HTREE_LOCK_PR ||
-+             lck->lk_mode == HTREE_LOCK_PW ||
-+             lck->lk_mode == HTREE_LOCK_EX;
-+}
-+
-+/* relock htree_lock with EX mode if it's change operation, otherwise
-+ * relock it with PR mode. It's noop if PDO is disabled. */
-+static void ext4_htree_safe_relock(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck)) {
-+              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
-+
-+              htree_change_lock(lck, ext4_htree_safe_mode(flags));
-+      }
-+}
-+
-+void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                   struct inode *dir, unsigned flags)
-+{
-+      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
-+                                            ext4_htree_safe_mode(flags);
-+
-+      ext4_htree_lock_data(lck)->ld_flags = flags;
-+      htree_lock(lck, lhead, mode);
-+      if (!is_dx(dir))
-+              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
-+}
-+EXPORT_SYMBOL(ext4_htree_lock);
-+
-+static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
-+                              unsigned lmask, int wait, void *ev)
-+{
-+      u32     key = (at == NULL) ? 0 : dx_get_block(at);
-+      u32     mode;
-+
-+      /* NOOP if htree is well protected or caller doesn't require the lock */
-+      if (ext4_htree_safe_locked(lck) ||
-+         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
-+              return 1;
-+
-+      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
-+              HTREE_LOCK_PW : HTREE_LOCK_PR;
-+      while (1) {
-+              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
-+                      return 1;
-+              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
-+                      return 0;
-+              cpu_relax(); /* spin until granted */
-+      }
-+}
-+
-+static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
-+{
-+      return ext4_htree_safe_locked(lck) ||
-+             htree_node_is_granted(lck, ffz(~lmask));
-+}
-+
-+static void ext4_htree_node_unlock(struct htree_lock *lck,
-+                                 unsigned lmask, void *buf)
-+{
-+      /* NB: it's safe to call mutiple times or even it's not locked */
-+      if (!ext4_htree_safe_locked(lck) &&
-+           htree_node_is_granted(lck, ffz(~lmask)))
-+              htree_node_unlock(lck, ffz(~lmask), buf);
-+}
-+
-+#define ext4_htree_dx_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
-+#define ext4_htree_dx_lock_try(lck, key)      \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
-+#define ext4_htree_dx_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
-+#define ext4_htree_dx_locked(lck)             \
-+      ext4_htree_node_locked(lck, EXT4_LB_DX)
-+
-+static void ext4_htree_dx_need_lock(struct htree_lock *lck)
-+{
-+      struct ext4_dir_lock_data *ld;
-+
-+      if (ext4_htree_safe_locked(lck))
-+              return;
-+
-+      ld = ext4_htree_lock_data(lck);
-+      switch (ld->ld_flags) {
-+      default:
-+              return;
-+      case EXT4_HLOCK_LOOKUP:
-+              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
-+              return;
-+      case EXT4_HLOCK_DEL:
-+              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
-+              return;
-+      case EXT4_HLOCK_ADD:
-+              ld->ld_flags = EXT4_HLOCK_SPLIT;
-+              return;
-+      }
-+}
-+
-+#define ext4_htree_de_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
-+#define ext4_htree_de_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
-+
-+#define ext4_htree_spin_lock(lck, key, event) \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
-+#define ext4_htree_spin_unlock(lck)           \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
-+#define ext4_htree_spin_unlock_listen(lck, p) \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
-+
-+static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck) &&
-+          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
-+              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
-+}
-+
-+enum {
-+      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
-+      DX_HASH_COL_YES,        /* there is collision and it does matter */
-+      DX_HASH_COL_NO,         /* there is no collision */
-+};
-+
-+static int dx_probe_hash_collision(struct htree_lock *lck,
-+                                 struct dx_entry *entries,
-+                                 struct dx_entry *at, u32 hash)
-+{
-+      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
-+              return DX_HASH_COL_IGNORE; /* don't care about collision */
-+
-+      } else if (at == entries + dx_get_count(entries) - 1) {
-+              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
-+
-+      } else { /* hash collision? */
-+              return ((dx_get_hash(at + 1) & ~1) == hash) ?
-+                      DX_HASH_COL_YES : DX_HASH_COL_NO;
-+      }
-+}
-+
- /*
-  * Probe for a directory leaf block to search.
-  *
-@@ -744,10 +975,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
-  */
- static struct dx_frame *
- dx_probe(struct ext4_filename *fname, struct inode *dir,
--       struct dx_hash_info *hinfo, struct dx_frame *frame_in)
-+       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
-+       struct htree_lock *lck)
- {
-       unsigned count, indirect;
--      struct dx_entry *at, *entries, *p, *q, *m;
-+      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
-       struct dx_root_info *info;
-       struct dx_frame *frame = frame_in;
-       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
-@@ -808,8 +1040,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       dxtrace(printk("Look up %x", hash));
-       while (1) {
-+              if (indirect == 0) { /* the last index level */
-+                      /* NB: ext4_htree_dx_lock() could be noop if
-+                       * DX-lock flag is not set for current operation */
-+                      ext4_htree_dx_lock(lck, dx);
-+                      ext4_htree_spin_lock(lck, dx, NULL);
-+              }
-               count = dx_get_count(entries);
--              if (!count || count > dx_get_limit(entries)) {
-+              if (count == 0 || count > dx_get_limit(entries)) {
-+                      ext4_htree_spin_unlock(lck); /* release spin */
-                       ext4_warning_inode(dir,
-                                          "dx entry: count %u beyond limit %u",
-                                          count, dx_get_limit(entries));
-@@ -847,8 +1086,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-                              dx_get_block(at)));
-               frame->entries = entries;
-               frame->at = at;
--              if (!indirect--)
-+
-+              if (indirect == 0) { /* the last index level */
-+                      struct ext4_dir_lock_data *ld;
-+                      u64 myblock;
-+
-+                      /* By default we only lock DE-block, however, we will
-+                       * also lock the last level DX-block if:
-+                       * a) there is hash collision
-+                       *    we will set DX-lock flag (a few lines below)
-+                       *    and redo to lock DX-block
-+                       *    see detail in dx_probe_hash_collision()
-+                       * b) it's a retry from splitting
-+                       *    we need to lock the last level DX-block so nobody
-+                       *    else can split any leaf blocks under the same
-+                       *    DX-block, see detail in ext4_dx_add_entry()
-+                       */
-+                      if (ext4_htree_dx_locked(lck)) {
-+                              /* DX-block is locked, just lock DE-block
-+                               * and return */
-+                              ext4_htree_spin_unlock(lck);
-+                              if (!ext4_htree_safe_locked(lck))
-+                                      ext4_htree_de_lock(lck, frame->at);
-+                              return frame;
-+                      }
-+                      /* it's pdirop and no DX lock */
-+                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
-+                          DX_HASH_COL_YES) {
-+                              /* found hash collision, set DX-lock flag
-+                               * and retry to abtain DX-lock */
-+                              ext4_htree_spin_unlock(lck);
-+                              ext4_htree_dx_need_lock(lck);
-+                              continue;
-+                      }
-+                      ld = ext4_htree_lock_data(lck);
-+                      /* because I don't lock DX, so @at can't be trusted
-+                       * after I release spinlock so I have to save it */
-+                      ld->ld_at = at;
-+                      ld->ld_at_entry = *at;
-+                      ld->ld_count = dx_get_count(entries);
-+
-+                      frame->at = &ld->ld_at_entry;
-+                      myblock = dx_get_block(at);
-+
-+                      /* NB: ordering locking */
-+                      ext4_htree_spin_unlock_listen(lck, &myblock);
-+                      /* other thread can split this DE-block because:
-+                       * a) I don't have lock for the DE-block yet
-+                       * b) I released spinlock on DX-block
-+                       * if it happened I can detect it by listening
-+                       * splitting event on this DE-block */
-+                      ext4_htree_de_lock(lck, frame->at);
-+                      ext4_htree_spin_stop_listen(lck);
-+
-+                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
-+                              /* someone split this DE-block before
-+                               * I locked it, I need to retry and lock
-+                               * valid DE-block */
-+                              ext4_htree_de_unlock(lck);
-+                              continue;
-+                      }
-                       return frame;
-+              }
-+              dx = at;
-+              indirect--;
-               frame++;
-               frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
-               if (IS_ERR(frame->bh)) {
-@@ -915,7 +1216,7 @@ static void dx_release(struct dx_frame *frames)
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash)
-+                               __u32 *start_hash, struct htree_lock *lck)
- {
-       struct dx_frame *p;
-       struct buffer_head *bh;
-@@ -930,12 +1231,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-        * this loop, num_frames indicates the number of interior
-        * nodes need to be read.
-        */
-+      ext4_htree_de_unlock(lck);
-       while (1) {
--              if (++(p->at) < p->entries + dx_get_count(p->entries))
--                      break;
-+              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
-+                      /* num_frames > 0 :
-+                       *   DX block
-+                       * ext4_htree_dx_locked:
-+                       *   frame->at is reliable pointer returned by dx_probe,
-+                       *   otherwise dx_probe already knew no collision */
-+                      if (++(p->at) < p->entries + dx_get_count(p->entries))
-+                              break;
-+              }
-               if (p == frames)
-                       return 0;
-               num_frames++;
-+              if (num_frames == 1)
-+                      ext4_htree_dx_unlock(lck);
-               p--;
-       }
-@@ -958,6 +1269,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-        * block so no check is necessary
-        */
-       while (num_frames--) {
-+              if (num_frames == 0) {
-+                      /* it's not always necessary, we just don't want to
-+                       * detect hash collision again */
-+                      ext4_htree_dx_need_lock(lck);
-+                      ext4_htree_dx_lock(lck, p->at);
-+              }
-+
-               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
-@@ -966,6 +1284,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-               p->bh = bh;
-               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-       }
-+      ext4_htree_de_lock(lck, p->at);
-       return 1;
- }
-@@ -1110,10 +1429,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-       }
-       hinfo.hash = start_hash;
-       hinfo.minor_hash = 0;
--      frame = dx_probe(NULL, dir, &hinfo, frames);
-+      /* assume it's PR locked */
-+      frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
--
-       /* Add '.' and '..' from the htree header */
-       if (!start_hash && !start_minor_hash) {
-               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-@@ -1148,7 +1467,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-               count += ret;
-               hashval = ~0;
-               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
--                                          frame, frames, &hashval);
-+                                          frame, frames, &hashval, NULL);
-               *next_hash = hashval;
-               if (ret < 0) {
-                       err = ret;
-@@ -1372,10 +1691,10 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
-  * The returned buffer_head has ->b_count elevated.  The caller is expected
-  * to brelse() it when appropriate.
-  */
--static struct buffer_head * ext4_find_entry (struct inode *dir,
-+struct buffer_head *__ext4_find_entry(struct inode *dir,
-                                       const struct qstr *d_name,
-                                       struct ext4_dir_entry_2 **res_dir,
--                                      int *inlined)
-+                                      int *inlined, struct htree_lock *lck)
- {
-       struct super_block *sb;
-       struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -1423,7 +1742,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
-               goto restart;
-       }
-       if (is_dx(dir)) {
--              ret = ext4_dx_find_entry(dir, &fname, res_dir);
-+              ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
-               /*
-                * On success, or if the error was file not found,
-                * return.  Otherwise, fall back to doing a search the
-@@ -1433,6 +1752,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
-                       goto cleanup_and_exit;
-               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
-                              "falling back\n"));
-+              ext4_htree_safe_relock(lck);
-       }
-       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
-       start = EXT4_I(dir)->i_dir_start_lookup;
-@@ -1528,10 +1848,12 @@ cleanup_and_exit:
-       ext4_fname_free_filename(&fname);
-       return ret;
- }
-+EXPORT_SYMBOL(__ext4_find_entry);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-                       struct ext4_filename *fname,
--                      struct ext4_dir_entry_2 **res_dir)
-+                      struct ext4_dir_entry_2 **res_dir,
-+                      struct htree_lock *lck)
- {
-       struct super_block * sb = dir->i_sb;
-       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-@@ -1543,7 +1865,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
-       *res_dir = NULL;
- #endif
--      frame = dx_probe(fname, dir, NULL, frames);
-+      frame = dx_probe(fname, dir, NULL, frames, lck);
-       if (IS_ERR(frame))
-               return (struct buffer_head *) frame;
-       do {
-@@ -1565,7 +1887,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-               /* Check to see if we should continue to search */
-               retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
--                                             frames, NULL);
-+                                             frames, NULL, lck);
-               if (retval < 0) {
-                       ext4_warning_inode(dir,
-                               "error %d reading directory index block",
-@@ -1738,8 +2060,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
-  * Returns pointer to de in block into which the new entry will be inserted.
-  */
- static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
--                      struct buffer_head **bh,struct dx_frame *frame,
--                      struct dx_hash_info *hinfo)
-+                      struct buffer_head **bh, struct dx_frame *frames,
-+                      struct dx_frame *frame, struct dx_hash_info *hinfo,
-+                      struct htree_lock *lck)
- {
-       unsigned blocksize = dir->i_sb->s_blocksize;
-       unsigned count, continued;
-@@ -1801,8 +2124,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-                                       hash2, split, count-split));
-       /* Fancy dance to stay within two buffers */
--      de2 = dx_move_dirents(data1, data2, map + split, count - split,
--                            blocksize);
-+      if (hinfo->hash < hash2) {
-+              de2 = dx_move_dirents(data1, data2, map + split,
-+                                    count - split, blocksize);
-+      } else {
-+              /* make sure we will add entry to the same block which
-+               * we have already locked */
-+              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
-+      }
-       de = dx_pack_dirents(data1, blocksize);
-       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
-                                          (char *) de,
-@@ -1823,12 +2152,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-       dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
-                       blocksize, 1));
--      /* Which block gets the new entry? */
--      if (hinfo->hash >= hash2) {
--              swap(*bh, bh2);
--              de = de2;
-+      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
-+                           frame->at); /* notify block is being split */
-+      if (hinfo->hash < hash2) {
-+              dx_insert_block(frame, hash2 + continued, newblock);
-+
-+      } else {
-+              /* switch block number */
-+              dx_insert_block(frame, hash2 + continued,
-+                              dx_get_block(frame->at));
-+              dx_set_block(frame->at, newblock);
-+              (frame->at)++;
-       }
--      dx_insert_block(frame, hash2 + continued, newblock);
-+      ext4_htree_spin_unlock(lck);
-+      ext4_htree_dx_unlock(lck);
-+
-       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
-       if (err)
-               goto journal_error;
-@@ -2121,7 +2459,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-       if (retval)
-               goto out_frames;        
--      de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
-+      de = do_split(handle,dir, &bh2, frames, frame, &fname->hinfo, NULL);
-       if (IS_ERR(de)) {
-               retval = PTR_ERR(de);
-               goto out_frames;
-@@ -2231,8 +2569,8 @@ out:
-  * may not sleep between calling this and putting something into
-  * the entry, as someone else might have used it while you slept.
-  */
--static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
--                        struct inode *inode)
-+int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck)
- {
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct buffer_head *bh = NULL;
-@@ -2273,9 +2611,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-               if (dentry->d_name.len == 2 &&
-                   memcmp(dentry->d_name.name, "..", 2) == 0)
-                       return ext4_update_dotdot(handle, dentry, inode);
--              retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
-+              retval = ext4_dx_add_entry(handle, &fname, dentry, inode, lck);
-               if (!retval || (retval != ERR_BAD_DX_DIR))
-                       goto out;
-+              ext4_htree_safe_relock(lck);
-               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
-               dx_fallback++;
-               ext4_mark_inode_dirty(handle, dir);
-@@ -2325,12 +2664,14 @@ out:
-               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
-       return retval;
- }
-+EXPORT_SYMBOL(__ext4_add_entry);
- /*
-  * Returns 0 for success, or a negative error value
-  */
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
--                           struct dentry *dentry, struct inode *inode)
-+                           struct dentry *dentry, struct inode *inode,
-+                           struct htree_lock *lck)
- {
-       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-@@ -2343,7 +2684,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
- again:
-       restart = 0;
--      frame = dx_probe(fname, dir, NULL, frames);
-+      frame = dx_probe(fname, dir, NULL, frames, lck);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
-       entries = frame->entries;
-@@ -2373,6 +2714,11 @@ again:
-               struct dx_node *node2;
-               struct buffer_head *bh2;
-+              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
-+                      ext4_htree_safe_relock(lck);
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-               while (frame > frames) {
-                       if (dx_get_count((frame - 1)->entries) <
-                           dx_get_limit((frame - 1)->entries)) {
-@@ -2472,8 +2818,32 @@ again:
-                       restart = 1;
-                       goto journal_error;
-               }
-+      } else if (!ext4_htree_dx_locked(lck)) {
-+              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
-+
-+              /* not well protected, require DX lock */
-+              ext4_htree_dx_need_lock(lck);
-+              at = frame > frames ? (frame - 1)->at : NULL;
-+
-+              /* NB: no risk of deadlock because it's just a try.
-+               *
-+               * NB: we check ld_count for twice, the first time before
-+               * having DX lock, the second time after holding DX lock.
-+               *
-+               * NB: We never free blocks for directory so far, which
-+               * means value returned by dx_get_count() should equal to
-+               * ld->ld_count if nobody split any DE-block under @at,
-+               * and ld->ld_at still points to valid dx_entry. */
-+              if ((ld->ld_count != dx_get_count(entries)) ||
-+                  !ext4_htree_dx_lock_try(lck, at) ||
-+                  (ld->ld_count != dx_get_count(entries))) {
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-+              /* OK, I've got DX lock and nothing changed */
-+              frame->at = ld->ld_at;
-       }
--      de = do_split(handle, dir, &bh, frame, &fname->hinfo);
-+      de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
-       if (IS_ERR(de)) {
-               err = PTR_ERR(de);
-               goto cleanup;
-@@ -2484,6 +2854,8 @@ again:
- journal_error:
-       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-+      ext4_htree_dx_unlock(lck);
-+      ext4_htree_de_unlock(lck);
-       brelse(bh);
-       dx_release(frames);
-       /* @restart is true means htree-path has been changed, we need to
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 02fe65b..be65ad4 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -896,6 +896,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
-       ei->vfs_inode.i_version = 1;
-       spin_lock_init(&ei->i_raw_lock);
-+      sema_init(&ei->i_append_sem, 1);
-       INIT_LIST_HEAD(&ei->i_prealloc_list);
-       spin_lock_init(&ei->i_prealloc_lock);
-       ext4_es_init_tree(&ei->i_es_tree);
-diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
-new file mode 100644
-index 0000000..9dc7788
---- /dev/null
-+++ b/include/linux/htree_lock.h
-@@ -0,0 +1,187 @@
-+/*
-+ * include/linux/htree_lock.h
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+
-+/*
-+ * htree lock
-+ *
-+ * htree_lock is an advanced lock, it can support five lock modes (concept is
-+ * taken from DLM) and it's a sleeping lock.
-+ *
-+ * most common use case is:
-+ * - create a htree_lock_head for data
-+ * - each thread (contender) creates it's own htree_lock
-+ * - contender needs to call htree_lock(lock_node, mode) to protect data and
-+ *   call htree_unlock to release lock
-+ *
-+ * Also, there is advanced use-case which is more complex, user can have
-+ * PW/PR lock on particular key, it's mostly used while user holding shared
-+ * lock on the htree (CW, CR)
-+ *
-+ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
-+ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
-+ * ...
-+ * htree_node_unlock(lock_node);; unlock the key
-+ *
-+ * Another tip is, we can have N-levels of this kind of keys, all we need to
-+ * do is specifying N-levels while creating htree_lock_head, then we can
-+ * lock/unlock a specific level by:
-+ * htree_node_lock(lock_node, mode1, key1, level1...);
-+ * do something;
-+ * htree_node_lock(lock_node, mode1, key2, level2...);
-+ * do something;
-+ * htree_node_unlock(lock_node, level2);
-+ * htree_node_unlock(lock_node, level1);
-+ *
-+ * NB: for multi-level, should be careful about locking order to avoid deadlock
-+ */
-+
-+#ifndef _LINUX_HTREE_LOCK_H
-+#define _LINUX_HTREE_LOCK_H
-+
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Lock Modes
-+ * more details can be found here:
-+ * http://en.wikipedia.org/wiki/Distributed_lock_manager
-+ */
-+typedef enum {
-+      HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
-+      HTREE_LOCK_PW,       /* protected write: allows only CR users */
-+      HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
-+      HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
-+      HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
-+      HTREE_LOCK_MAX,      /* number of lock modes */
-+} htree_lock_mode_t;
-+
-+#define HTREE_LOCK_NL         HTREE_LOCK_MAX
-+#define HTREE_LOCK_INVAL      0xdead10c
-+
-+enum {
-+      HTREE_HBITS_MIN         = 2,
-+      HTREE_HBITS_DEF         = 14,
-+      HTREE_HBITS_MAX         = 32,
-+};
-+
-+enum {
-+      HTREE_EVENT_DISABLE     = (0),
-+      HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
-+      HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
-+      HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
-+};
-+
-+struct htree_lock;
-+
-+typedef void (*htree_event_cb_t)(void *target, void *event);
-+
-+struct htree_lock_child {
-+      struct list_head        lc_list;        /* granted list */
-+      htree_event_cb_t        lc_callback;    /* event callback */
-+      unsigned                lc_events;      /* event types */
-+};
-+
-+struct htree_lock_head {
-+      unsigned long           lh_lock;        /* bits lock */
-+      /* blocked lock list (htree_lock) */
-+      struct list_head        lh_blocked_list;
-+      /* # key levels */
-+      u16                     lh_depth;
-+      /* hash bits for key and limit number of locks */
-+      u16                     lh_hbits;
-+      /* counters for blocked locks */
-+      u16                     lh_nblocked[HTREE_LOCK_MAX];
-+      /* counters for granted locks */
-+      u16                     lh_ngranted[HTREE_LOCK_MAX];
-+      /* private data */
-+      void                    *lh_private;
-+      /* array of children locks */
-+      struct htree_lock_child lh_children[0];
-+};
-+
-+/* htree_lock_node_t is child-lock for a specific key (ln_value) */
-+struct htree_lock_node {
-+      htree_lock_mode_t       ln_mode;
-+      /* major hash key */
-+      u16                     ln_major_key;
-+      /* minor hash key */
-+      u16                     ln_minor_key;
-+      struct list_head        ln_major_list;
-+      struct list_head        ln_minor_list;
-+      /* alive list, all locks (granted, blocked, listening) are on it */
-+      struct list_head        ln_alive_list;
-+      /* blocked list */
-+      struct list_head        ln_blocked_list;
-+      /* granted list */
-+      struct list_head        ln_granted_list;
-+      void                    *ln_ev_target;
-+};
-+
-+struct htree_lock {
-+      struct task_struct      *lk_task;
-+      struct htree_lock_head  *lk_head;
-+      void                    *lk_private;
-+      unsigned                lk_depth;
-+      htree_lock_mode_t       lk_mode;
-+      struct list_head        lk_blocked_list;
-+      struct htree_lock_node  lk_nodes[0];
-+};
-+
-+/* create a lock head, which stands for a resource */
-+struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
-+                                            unsigned hbits, unsigned priv);
-+/* free a lock head */
-+void htree_lock_head_free(struct htree_lock_head *lhead);
-+/* register event callback for child lock at level @depth */
-+void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
-+                           unsigned events, htree_event_cb_t callback);
-+/* create a lock handle, which stands for a thread */
-+struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
-+/* free a lock handle */
-+void htree_lock_free(struct htree_lock *lck);
-+/* lock htree, when @wait is true, 0 is returned if the lock can't
-+ * be granted immediately */
-+int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                 htree_lock_mode_t mode, int wait);
-+/* unlock htree */
-+void htree_unlock(struct htree_lock *lck);
-+/* unlock and relock htree with @new_mode */
-+int htree_change_lock_try(struct htree_lock *lck,
-+                        htree_lock_mode_t new_mode, int wait);
-+void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
-+/* require child lock (key) of htree at level @dep, @event will be sent to all
-+ * listeners on this @key while lock being granted */
-+int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                      u32 key, unsigned dep, int wait, void *event);
-+/* release child lock at level @dep, this lock will listen on it's key
-+ * if @event isn't NULL, event_cb will be called against @lck while granting
-+ * any other lock at level @dep with the same key */
-+void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
-+/* stop listening on child lock at level @dep */
-+void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
-+/* for debug */
-+void htree_lock_stat_print(int depth);
-+void htree_lock_stat_reset(void);
-+
-+#define htree_lock(lck, lh, mode)     htree_lock_try(lck, lh, mode, 1)
-+#define htree_change_lock(lck, mode)  htree_change_lock_try(lck, mode, 1)
-+
-+#define htree_lock_mode(lck)          ((lck)->lk_mode)
-+
-+#define htree_node_lock(lck, mode, key, dep)  \
-+      htree_node_lock_try(lck, mode, key, dep, 1, NULL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_granted(lck, dep)               \
-+      ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
-+       (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_listening(lck, dep)     \
-+      ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
-+
-+#endif
diff --git a/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/ubuntu14+16/ext4-pdirop.patch
deleted file mode 100644 (file)
index 81ee180..0000000
+++ /dev/null
@@ -1,1927 +0,0 @@
-diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
-index f52cf54..3f16939 100644
---- a/fs/ext4/Makefile
-+++ b/fs/ext4/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
- ext4-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
-               ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+              htree_lock.o \
-               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-               mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
-               xattr_trusted.o inline.o readpage.o sysfs.o
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 2d22f1a..005c9b3 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -28,6 +28,7 @@
- #include <linux/timer.h>
- #include <linux/version.h>
- #include <linux/wait.h>
-+#include <linux/htree_lock.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #include <linux/ratelimit.h>
-@@ -880,6 +881,9 @@ struct ext4_inode_info {
-       __u32   i_dtime;
-       ext4_fsblk_t    i_file_acl;
-+      /* following fields for parallel directory operations -bzzz */
-+      struct semaphore i_append_sem;
-+
-       /*
-        * i_block_group is the number of the block group which contains
-        * this file's inode.  Constant across the lifetime of the inode,
-@@ -2086,6 +2090,71 @@ struct dx_hash_info
-       u32             *seed;
- };
-+/* assume name-hash is protected by upper layer */
-+#define EXT4_HTREE_LOCK_HASH  0
-+
-+enum ext4_pdo_lk_types {
-+#if EXT4_HTREE_LOCK_HASH
-+      EXT4_LK_HASH,
-+#endif
-+      EXT4_LK_DX,             /* index block */
-+      EXT4_LK_DE,             /* directory entry block */
-+      EXT4_LK_SPIN,           /* spinlock */
-+      EXT4_LK_MAX,
-+};
-+
-+/* read-only bit */
-+#define EXT4_LB_RO(b)         (1 << (b))
-+/* read + write, high bits for writer */
-+#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
-+
-+enum ext4_pdo_lock_bits {
-+      /* DX lock bits */
-+      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
-+      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
-+      /* DE lock bits */
-+      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
-+      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
-+      /* DX spinlock bits */
-+      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
-+      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
-+      /* accurate searching */
-+      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
-+};
-+
-+enum ext4_pdo_lock_opc {
-+      /* external */
-+      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
-+      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
-+
-+      /* internal */
-+      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
-+                                 EXT4_LB_EXACT),
-+      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
-+      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
-+};
-+
-+extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
-+#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
-+
-+extern struct htree_lock *ext4_htree_lock_alloc(void);
-+#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
-+
-+extern void ext4_htree_lock(struct htree_lock *lck,
-+                          struct htree_lock_head *lhead,
-+                          struct inode *dir, unsigned flags);
-+#define ext4_htree_unlock(lck)                  htree_unlock(lck)
-+
-+extern struct buffer_head *__ext4_find_entry(struct inode *dir,
-+                                      const struct qstr *d_name,
-+                                      struct ext4_dir_entry_2 **res_dir,
-+                                      int *inlined, struct htree_lock *lck);
-+extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck);
- /* 32 and 64 bit signed EOF for dx directories */
- #define EXT4_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
-@@ -2475,8 +2544,16 @@ int ext4_insert_dentry(struct inode *dir,
-                      struct ext4_filename *fname, void *data);
- static inline void ext4_update_dx_flag(struct inode *inode)
- {
-+      /* Disable it for ldiskfs, because going from a DX directory to
-+       * a non-DX directory while it is in use will completely break
-+       * the htree-locking.
-+       * If we really want to support this operation in the future,
-+       * we need to exclusively lock the directory at here which will
-+       * increase complexity of code */
-+#if 0
-       if (!ext4_has_feature_dir_index(inode->i_sb))
-               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-+#endif
- }
- static unsigned char ext4_filetype_table[] = {
-       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
-new file mode 100644
-index 0000000..99e7375
---- /dev/null
-+++ b/fs/ext4/htree_lock.c
-@@ -0,0 +1,891 @@
-+/*
-+ * fs/ext4/htree_lock.c
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+#include <linux/jbd2.h>
-+#include <linux/hash.h>
-+#include <linux/module.h>
-+#include <linux/htree_lock.h>
-+
-+enum {
-+      HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
-+      HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
-+      HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
-+      HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
-+      HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
-+};
-+
-+enum {
-+      HTREE_LOCK_COMPAT_EX    = 0,
-+      HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
-+      HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
-+      HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
-+      HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
-+                                HTREE_LOCK_BIT_PW,
-+};
-+
-+static int htree_lock_compat[] = {
-+      [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
-+      [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
-+      [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
-+      [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
-+      [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
-+};
-+
-+/* max allowed htree-lock depth.
-+ * We only need depth=3 for ext4 although user can have higher value. */
-+#define HTREE_LOCK_DEP_MAX    16
-+
-+#ifdef HTREE_LOCK_DEBUG
-+
-+static char *hl_name[] = {
-+      [HTREE_LOCK_EX]         "EX",
-+      [HTREE_LOCK_PW]         "PW",
-+      [HTREE_LOCK_PR]         "PR",
-+      [HTREE_LOCK_CW]         "CW",
-+      [HTREE_LOCK_CR]         "CR",
-+};
-+
-+/* lock stats */
-+struct htree_lock_node_stats {
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      retried[HTREE_LOCK_MAX];
-+      unsigned long long      events;
-+};
-+
-+struct htree_lock_stats {
-+      struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
-+      unsigned long long      granted[HTREE_LOCK_MAX];
-+      unsigned long long      blocked[HTREE_LOCK_MAX];
-+};
-+
-+static struct htree_lock_stats hl_stats;
-+
-+void htree_lock_stat_reset(void)
-+{
-+      memset(&hl_stats, 0, sizeof(hl_stats));
-+}
-+
-+void htree_lock_stat_print(int depth)
-+{
-+      int     i;
-+      int     j;
-+
-+      printk(KERN_DEBUG "HTREE LOCK STATS:\n");
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
-+                     hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
-+      }
-+      for (i = 0; i < depth; i++) {
-+              printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
-+              for (j = 0; j < HTREE_LOCK_MAX; j++) {
-+                      printk(KERN_DEBUG
-+                              "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
-+                              hl_name[j], hl_stats.nodes[i].granted[j],
-+                              hl_stats.nodes[i].blocked[j],
-+                              hl_stats.nodes[i].retried[j]);
-+              }
-+      }
-+}
-+
-+#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
-+#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
-+#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
-+#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
-+#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
-+#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
-+
-+#else /* !DEBUG */
-+
-+void htree_lock_stat_reset(void) {}
-+void htree_lock_stat_print(int depth) {}
-+
-+#define lk_grant_inc(m)             do {} while (0)
-+#define lk_block_inc(m)             do {} while (0)
-+#define ln_grant_inc(d, m)    do {} while (0)
-+#define ln_block_inc(d, m)    do {} while (0)
-+#define ln_retry_inc(d, m)    do {} while (0)
-+#define ln_event_inc(d)             do {} while (0)
-+
-+#endif /* DEBUG */
-+
-+EXPORT_SYMBOL(htree_lock_stat_reset);
-+EXPORT_SYMBOL(htree_lock_stat_print);
-+
-+#define HTREE_DEP_ROOT                  (-1)
-+
-+#define htree_spin_lock(lhead, dep)                           \
-+      bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
-+#define htree_spin_unlock(lhead, dep)                         \
-+      bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
-+
-+#define htree_key_event_ignore(child, ln)                     \
-+      (!((child)->lc_events & (1 << (ln)->ln_mode)))
-+
-+static int
-+htree_key_list_empty(struct htree_lock_node *ln)
-+{
-+      return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
-+}
-+
-+static void
-+htree_key_list_del_init(struct htree_lock_node *ln)
-+{
-+      struct htree_lock_node *tmp = NULL;
-+
-+      if (!list_empty(&ln->ln_minor_list)) {
-+              tmp = list_entry(ln->ln_minor_list.next,
-+                               struct htree_lock_node, ln_minor_list);
-+              list_del_init(&ln->ln_minor_list);
-+      }
-+
-+      if (list_empty(&ln->ln_major_list))
-+              return;
-+
-+      if (tmp == NULL) { /* not on minor key list */
-+              list_del_init(&ln->ln_major_list);
-+      } else {
-+              BUG_ON(!list_empty(&tmp->ln_major_list));
-+              list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
-+      }
-+}
-+
-+static void
-+htree_key_list_replace_init(struct htree_lock_node *old,
-+                          struct htree_lock_node *new)
-+{
-+      if (!list_empty(&old->ln_major_list))
-+              list_replace_init(&old->ln_major_list, &new->ln_major_list);
-+
-+      if (!list_empty(&old->ln_minor_list))
-+              list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
-+}
-+
-+static void
-+htree_key_event_enqueue(struct htree_lock_child *child,
-+                      struct htree_lock_node *ln, int dep, void *event)
-+{
-+      struct htree_lock_node *tmp;
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
-+      if (event == NULL || htree_key_event_ignore(child, ln))
-+              return;
-+
-+      /* shouldn't be a very long list */
-+      list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
-+              if (tmp->ln_mode == HTREE_LOCK_NL) {
-+                      ln_event_inc(dep);
-+                      if (child->lc_callback != NULL)
-+                              child->lc_callback(tmp->ln_ev_target, event);
-+              }
-+      }
-+}
-+
-+static int
-+htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
-+                      unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
-+      struct htree_lock_node *newln = &newlk->lk_nodes[dep];
-+      struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+
-+      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+      /* NB: we only expect PR/PW lock mode at here, only these two modes are
-+       * allowed for htree_node_lock(asserted in htree_node_lock_internal),
-+       * NL is only used for listener, user can't directly require NL mode */
-+      if ((curln->ln_mode == HTREE_LOCK_NL) ||
-+          (curln->ln_mode != HTREE_LOCK_PW &&
-+           newln->ln_mode != HTREE_LOCK_PW)) {
-+              /* no conflict, attach it on granted list of @curlk */
-+              if (curln->ln_mode != HTREE_LOCK_NL) {
-+                      list_add(&newln->ln_granted_list,
-+                               &curln->ln_granted_list);
-+              } else {
-+                      /* replace key owner */
-+                      htree_key_list_replace_init(curln, newln);
-+              }
-+
-+              list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+              htree_key_event_enqueue(child, newln, dep, event);
-+              ln_grant_inc(dep, newln->ln_mode);
-+              return 1; /* still hold lh_lock */
-+      }
-+
-+      if (!wait) { /* can't grant and don't want to wait */
-+              ln_retry_inc(dep, newln->ln_mode);
-+              newln->ln_mode = HTREE_LOCK_INVAL;
-+              return -1; /* don't wait and just return -1 */
-+      }
-+
-+      newlk->lk_task = current;
-+      set_current_state(TASK_UNINTERRUPTIBLE);
-+      /* conflict, attach it on blocked list of curlk */
-+      list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
-+      list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+      ln_block_inc(dep, newln->ln_mode);
-+
-+      htree_spin_unlock(newlk->lk_head, dep);
-+      /* wait to be given the lock */
-+      if (newlk->lk_task != NULL)
-+              schedule();
-+      /* granted, no doubt, wake up will set me RUNNING */
-+      if (event == NULL || htree_key_event_ignore(child, newln))
-+              return 0; /* granted without lh_lock */
-+
-+      htree_spin_lock(newlk->lk_head, dep);
-+      htree_key_event_enqueue(child, newln, dep, event);
-+      return 1; /* still hold lh_lock */
-+}
-+
-+/*
-+ * get PR/PW access to particular tree-node according to @dep and @key,
-+ * it will return -1 if @wait is false and can't immediately grant this lock.
-+ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
-+ * @event if it's not NULL.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
-+                       htree_lock_mode_t mode, u32 key, unsigned dep,
-+                       int wait, void *event)
-+{
-+      LIST_HEAD(list);
-+      struct htree_lock       *tmp;
-+      struct htree_lock       *tmp2;
-+      u16                     major;
-+      u16                     minor;
-+      u8                      reverse;
-+      u8                      ma_bits;
-+      u8                      mi_bits;
-+
-+      BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+
-+      key = hash_long(key, lhead->lh_hbits);
-+
-+      mi_bits = lhead->lh_hbits >> 1;
-+      ma_bits = lhead->lh_hbits - mi_bits;
-+
-+      lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
-+      lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
-+      lck->lk_nodes[dep].ln_mode = mode;
-+
-+      /*
-+       * The major key list is an ordered list, so searches are started
-+       * at the end of the list that is numerically closer to major_key,
-+       * so at most half of the list will be walked (for well-distributed
-+       * keys). The list traversal aborts early if the expected key
-+       * location is passed.
-+       */
-+      reverse = (major >= (1 << (ma_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp,
-+                                      &lhead->lh_children[dep].lc_list,
-+                                      lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key < major) {
-+                              /* attach _after_ @tmp */
-+                              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                                       &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_major_list,
-+                       &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+
-+      } else {
-+              list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
-+                                  lk_nodes[dep].ln_major_list) {
-+                      if (tmp->lk_nodes[dep].ln_major_key == major) {
-+                              goto search_minor;
-+
-+                      } else if (tmp->lk_nodes[dep].ln_major_key > major) {
-+                              /* insert _before_ @tmp */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                                      &tmp->lk_nodes[dep].ln_major_list);
-+                              goto out_grant_major;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+                            &lhead->lh_children[dep].lc_list);
-+              goto out_grant_major;
-+      }
-+
-+ search_minor:
-+      /*
-+       * NB: minor_key list doesn't have a "head", @list is just a
-+       * temporary stub for helping list searching, make sure it's removed
-+       * after searching.
-+       * minor_key list is an ordered list too.
-+       */
-+      list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
-+
-+      reverse = (minor >= (1 << (mi_bits - 1)));
-+
-+      if (reverse) {
-+              list_for_each_entry_reverse(tmp2, &list,
-+                                          lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
-+                              /* attach _after_ @tmp2 */
-+                              list_add(&lck->lk_nodes[dep].ln_minor_list,
-+                                       &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
-+
-+      } else {
-+              list_for_each_entry(tmp2, &list,
-+                                  lk_nodes[dep].ln_minor_list) {
-+                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+                              goto out_enqueue;
-+
-+                      } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
-+                              /* insert _before_ @tmp2 */
-+                              list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
-+                                      &tmp2->lk_nodes[dep].ln_minor_list);
-+                              goto out_grant_minor;
-+                      }
-+              }
-+
-+              list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
-+      }
-+
-+ out_grant_minor:
-+      if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
-+              /* new lock @lck is the first one on minor_key list, which
-+               * means it has the smallest minor_key and it should
-+               * replace @tmp as minor_key owner */
-+              list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
-+                                &lck->lk_nodes[dep].ln_major_list);
-+      }
-+      /* remove the temporary head */
-+      list_del(&list);
-+
-+ out_grant_major:
-+      ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
-+      return 1; /* granted with holding lh_lock */
-+
-+ out_enqueue:
-+      list_del(&list); /* remove temprary head */
-+      return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
-+}
-+
-+/*
-+ * release the key of @lck at level @dep, and grant any blocked locks.
-+ * caller will still listen on @key if @event is not NULL, which means
-+ * caller can see a event (by event_cb) while granting any lock with
-+ * the same key at level @dep.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
-+ */
-+static void
-+htree_node_unlock_internal(struct htree_lock_head *lhead,
-+                         struct htree_lock *curlk, unsigned dep, void *event)
-+{
-+      struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
-+      struct htree_lock       *grtlk = NULL;
-+      struct htree_lock_node  *grtln;
-+      struct htree_lock       *poslk;
-+      struct htree_lock       *tmplk;
-+
-+      if (!htree_node_is_granted(curlk, dep))
-+              return;
-+
-+      if (!list_empty(&curln->ln_granted_list)) {
-+              /* there is another granted lock */
-+              grtlk = list_entry(curln->ln_granted_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_granted_list);
-+              list_del_init(&curln->ln_granted_list);
-+      }
-+
-+      if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
-+              /*
-+               * @curlk is the only granted lock, so we confirmed:
-+               * a) curln is key owner (attached on major/minor_list),
-+               *    so if there is any blocked lock, it should be attached
-+               *    on curln->ln_blocked_list
-+               * b) we always can grant the first blocked lock
-+               */
-+              grtlk = list_entry(curln->ln_blocked_list.next,
-+                                 struct htree_lock,
-+                                 lk_nodes[dep].ln_blocked_list);
-+              BUG_ON(grtlk->lk_task == NULL);
-+              wake_up_process(grtlk->lk_task);
-+      }
-+
-+      if (event != NULL &&
-+          lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
-+              curln->ln_ev_target = event;
-+              curln->ln_mode = HTREE_LOCK_NL; /* listen! */
-+      } else {
-+              curln->ln_mode = HTREE_LOCK_INVAL;
-+      }
-+
-+      if (grtlk == NULL) { /* I must be the only one locking this key */
-+              struct htree_lock_node *tmpln;
-+
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
-+                      return;
-+
-+              /* not listening */
-+              if (list_empty(&curln->ln_alive_list)) { /* no more listener */
-+                      htree_key_list_del_init(curln);
-+                      return;
-+              }
-+
-+              tmpln = list_entry(curln->ln_alive_list.next,
-+                                 struct htree_lock_node, ln_alive_list);
-+
-+              BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
-+
-+              htree_key_list_replace_init(curln, tmpln);
-+              list_del_init(&curln->ln_alive_list);
-+
-+              return;
-+      }
-+
-+      /* have a granted lock */
-+      grtln = &grtlk->lk_nodes[dep];
-+      if (!list_empty(&curln->ln_blocked_list)) {
-+              /* only key owner can be on both lists */
-+              BUG_ON(htree_key_list_empty(curln));
-+
-+              if (list_empty(&grtln->ln_blocked_list)) {
-+                      list_add(&grtln->ln_blocked_list,
-+                               &curln->ln_blocked_list);
-+              }
-+              list_del_init(&curln->ln_blocked_list);
-+      }
-+      /*
-+       * NB: this is the tricky part:
-+       * We have only two modes for child-lock (PR and PW), also,
-+       * only owner of the key (attached on major/minor_list) can be on
-+       * both blocked_list and granted_list, so @grtlk must be one
-+       * of these two cases:
-+       *
-+       * a) @grtlk is taken from granted_list, which means we've granted
-+       *    more than one lock so @grtlk has to be PR, the first blocked
-+       *    lock must be PW and we can't grant it at all.
-+       *    So even @grtlk is not owner of the key (empty blocked_list),
-+       *    we don't care because we can't grant any lock.
-+       * b) we just grant a new lock which is taken from head of blocked
-+       *    list, and it should be the first granted lock, and it should
-+       *    be the first one linked on blocked_list.
-+       *
-+       * Either way, we can get correct result by iterating blocked_list
-+       * of @grtlk, and don't have to bother on how to find out
-+       * owner of current key.
-+       */
-+      list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
-+                               lk_nodes[dep].ln_blocked_list) {
-+              if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
-+                  poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
-+                      break;
-+              /* grant all readers */
-+              list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
-+              list_add(&poslk->lk_nodes[dep].ln_granted_list,
-+                       &grtln->ln_granted_list);
-+
-+              BUG_ON(poslk->lk_task == NULL);
-+              wake_up_process(poslk->lk_task);
-+      }
-+
-+      /* if @curln is the owner of this key, replace it with @grtln */
-+      if (!htree_key_list_empty(curln))
-+              htree_key_list_replace_init(curln, grtln);
-+
-+      if (curln->ln_mode == HTREE_LOCK_INVAL)
-+              list_del_init(&curln->ln_alive_list);
-+}
-+
-+/*
-+ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
-+ * and 0 only if @wait is false and can't grant it immediately
-+ */
-+int
-+htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                  u32 key, unsigned dep, int wait, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, dep);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_node_lock_try);
-+
-+/* it's wrapper of htree_node_unlock_internal */
-+void
-+htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+
-+      BUG_ON(dep >= lck->lk_depth);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_spin_lock(lhead, dep);
-+      htree_node_unlock_internal(lhead, lck, dep, event);
-+      htree_spin_unlock(lhead, dep);
-+}
-+EXPORT_SYMBOL(htree_node_unlock);
-+
-+/* stop listening on child-lock level @dep */
-+void
-+htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
-+{
-+      struct htree_lock_node *ln = &lck->lk_nodes[dep];
-+      struct htree_lock_node *tmp;
-+
-+      BUG_ON(htree_node_is_granted(lck, dep));
-+      BUG_ON(!list_empty(&ln->ln_blocked_list));
-+      BUG_ON(!list_empty(&ln->ln_granted_list));
-+
-+      if (!htree_node_is_listening(lck, dep))
-+              return;
-+
-+      htree_spin_lock(lck->lk_head, dep);
-+      ln->ln_mode = HTREE_LOCK_INVAL;
-+      ln->ln_ev_target = NULL;
-+
-+      if (htree_key_list_empty(ln)) { /* not owner */
-+              list_del_init(&ln->ln_alive_list);
-+              goto out;
-+      }
-+
-+      /* I'm the owner... */
-+      if (list_empty(&ln->ln_alive_list)) { /* no more listener */
-+              htree_key_list_del_init(ln);
-+              goto out;
-+      }
-+
-+      tmp = list_entry(ln->ln_alive_list.next,
-+                       struct htree_lock_node, ln_alive_list);
-+
-+      BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
-+      htree_key_list_replace_init(ln, tmp);
-+      list_del_init(&ln->ln_alive_list);
-+ out:
-+      htree_spin_unlock(lck->lk_head, dep);
-+}
-+EXPORT_SYMBOL(htree_node_stop_listen);
-+
-+/* release all child-locks if we have any */
-+static void
-+htree_node_release_all(struct htree_lock *lck)
-+{
-+      int     i;
-+
-+      for (i = 0; i < lck->lk_depth; i++) {
-+              if (htree_node_is_granted(lck, i))
-+                      htree_node_unlock(lck, i, NULL);
-+              else if (htree_node_is_listening(lck, i))
-+                      htree_node_stop_listen(lck, i);
-+      }
-+}
-+
-+/*
-+ * obtain htree lock, it could be blocked inside if there's conflict
-+ * with any granted or blocked lock and @wait is true.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_lock_internal(struct htree_lock *lck, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int     granted = 0;
-+      int     blocked = 0;
-+      int     i;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+              if (lhead->lh_nblocked[i] != 0)
-+                      blocked |= 1 << i;
-+      }
-+      if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
-+          (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
-+              /* will block current lock even it just conflicts with any
-+               * other blocked lock, so lock like EX wouldn't starve */
-+              if (!wait)
-+                      return -1;
-+              lhead->lh_nblocked[lck->lk_mode]++;
-+              lk_block_inc(lck->lk_mode);
-+
-+              lck->lk_task = current;
-+              list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
-+
-+retry:
-+              set_current_state(TASK_UNINTERRUPTIBLE);
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+              /* wait to be given the lock */
-+              if (lck->lk_task != NULL)
-+                      schedule();
-+              /* granted, no doubt. wake up will set me RUNNING.
-+               * Since thread would be waken up accidentally,
-+               * so we need check lock whether granted or not again. */
-+              if (!list_empty(&lck->lk_blocked_list)) {
-+                      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+                      if (list_empty(&lck->lk_blocked_list)) {
-+                              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+                              return 0;
-+                      }
-+                      goto retry;
-+              }
-+              return 0; /* without lh_lock */
-+      }
-+      lhead->lh_ngranted[lck->lk_mode]++;
-+      lk_grant_inc(lck->lk_mode);
-+      return 1;
-+}
-+
-+/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
-+static void
-+htree_unlock_internal(struct htree_lock *lck)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      struct htree_lock *tmp;
-+      struct htree_lock *tmp2;
-+      int granted = 0;
-+      int i;
-+
-+      BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
-+
-+      lhead->lh_ngranted[lck->lk_mode]--;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+
-+      for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+              if (lhead->lh_ngranted[i] != 0)
-+                      granted |= 1 << i;
-+      }
-+      list_for_each_entry_safe(tmp, tmp2,
-+                               &lhead->lh_blocked_list, lk_blocked_list) {
-+              /* conflict with any granted lock? */
-+              if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
-+                      break;
-+
-+              list_del_init(&tmp->lk_blocked_list);
-+
-+              BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
-+
-+              lhead->lh_nblocked[tmp->lk_mode]--;
-+              lhead->lh_ngranted[tmp->lk_mode]++;
-+              granted |= 1 << tmp->lk_mode;
-+
-+              BUG_ON(tmp->lk_task == NULL);
-+              wake_up_process(tmp->lk_task);
-+      }
-+}
-+
-+/* it's wrapper of htree_lock_internal and exported interface.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+             htree_lock_mode_t mode, int wait)
-+{
-+      int     rc;
-+
-+      BUG_ON(lck->lk_depth > lhead->lh_depth);
-+      BUG_ON(lck->lk_head != NULL);
-+      BUG_ON(lck->lk_task != NULL);
-+
-+      lck->lk_head = lhead;
-+      lck->lk_mode = mode;
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_lock_try);
-+
-+/* it's wrapper of htree_unlock_internal and exported interface.
-+ * It will release all htree_node_locks and htree_lock */
-+void
-+htree_unlock(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_head == NULL);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
-+      lck->lk_head = NULL;
-+      lck->lk_task = NULL;
-+}
-+EXPORT_SYMBOL(htree_unlock);
-+
-+/* change lock mode */
-+void
-+htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
-+{
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+      lck->lk_mode = mode;
-+}
-+EXPORT_SYMBOL(htree_change_mode);
-+
-+/* release htree lock, and lock it again with new mode.
-+ * This function will first release all htree_node_locks and htree_lock,
-+ * then try to gain htree_lock with new @mode.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
-+{
-+      struct htree_lock_head *lhead = lck->lk_head;
-+      int rc;
-+
-+      BUG_ON(lhead == NULL);
-+      BUG_ON(lck->lk_mode == mode);
-+      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
-+
-+      htree_node_release_all(lck);
-+
-+      htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+      htree_unlock_internal(lck);
-+      lck->lk_mode = mode;
-+      rc = htree_lock_internal(lck, wait);
-+      if (rc != 0)
-+              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+      return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_change_lock_try);
-+
-+/* create a htree_lock head with @depth levels (number of child-locks),
-+ * it is a per resoruce structure */
-+struct htree_lock_head *
-+htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
-+{
-+      struct htree_lock_head *lhead;
-+      int  i;
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+
-+      lhead = kzalloc(offsetof(struct htree_lock_head,
-+                               lh_children[depth]) + priv, GFP_NOFS);
-+      if (lhead == NULL)
-+              return NULL;
-+
-+      if (hbits < HTREE_HBITS_MIN)
-+              lhead->lh_hbits = HTREE_HBITS_MIN;
-+      else if (hbits > HTREE_HBITS_MAX)
-+              lhead->lh_hbits = HTREE_HBITS_MAX;
-+
-+      lhead->lh_lock = 0;
-+      lhead->lh_depth = depth;
-+      INIT_LIST_HEAD(&lhead->lh_blocked_list);
-+      if (priv > 0) {
-+              lhead->lh_private = (void *)lhead +
-+                      offsetof(struct htree_lock_head, lh_children[depth]);
-+      }
-+
-+      for (i = 0; i < depth; i++) {
-+              INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
-+              lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(htree_lock_head_alloc);
-+
-+/* free the htree_lock head */
-+void
-+htree_lock_head_free(struct htree_lock_head *lhead)
-+{
-+      int     i;
-+
-+      BUG_ON(!list_empty(&lhead->lh_blocked_list));
-+      for (i = 0; i < lhead->lh_depth; i++)
-+              BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
-+      kfree(lhead);
-+}
-+EXPORT_SYMBOL(htree_lock_head_free);
-+
-+/* register event callback for @events of child-lock at level @dep */
-+void
-+htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
-+                      unsigned events, htree_event_cb_t callback)
-+{
-+      BUG_ON(lhead->lh_depth <= dep);
-+      lhead->lh_children[dep].lc_events = events;
-+      lhead->lh_children[dep].lc_callback = callback;
-+}
-+EXPORT_SYMBOL(htree_lock_event_attach);
-+
-+/* allocate a htree_lock, which is per-thread structure, @pbytes is some
-+ * extra-bytes as private data for caller */
-+struct htree_lock *
-+htree_lock_alloc(unsigned depth, unsigned pbytes)
-+{
-+      struct htree_lock *lck;
-+      int i = offsetof(struct htree_lock, lk_nodes[depth]);
-+
-+      if (depth > HTREE_LOCK_DEP_MAX) {
-+              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+                      depth, HTREE_LOCK_DEP_MAX);
-+              return NULL;
-+      }
-+      lck = kzalloc(i + pbytes, GFP_NOFS);
-+      if (lck == NULL)
-+              return NULL;
-+
-+      if (pbytes != 0)
-+              lck->lk_private = (void *)lck + i;
-+      lck->lk_mode = HTREE_LOCK_INVAL;
-+      lck->lk_depth = depth;
-+      INIT_LIST_HEAD(&lck->lk_blocked_list);
-+
-+      for (i = 0; i < depth; i++) {
-+              struct htree_lock_node *node = &lck->lk_nodes[i];
-+
-+              node->ln_mode = HTREE_LOCK_INVAL;
-+              INIT_LIST_HEAD(&node->ln_major_list);
-+              INIT_LIST_HEAD(&node->ln_minor_list);
-+              INIT_LIST_HEAD(&node->ln_alive_list);
-+              INIT_LIST_HEAD(&node->ln_blocked_list);
-+              INIT_LIST_HEAD(&node->ln_granted_list);
-+      }
-+
-+      return lck;
-+}
-+EXPORT_SYMBOL(htree_lock_alloc);
-+
-+/* free htree_lock node */
-+void
-+htree_lock_free(struct htree_lock *lck)
-+{
-+      BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
-+      kfree(lck);
-+}
-+EXPORT_SYMBOL(htree_lock_free);
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 3f70bca..99a8da2 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -52,6 +52,7 @@ struct buffer_head *ext4_append(handle_t *handle,
-                                       ext4_lblk_t *block)
- {
-       struct buffer_head *bh;
-+      struct ext4_inode_info *ei = EXT4_I(inode);
-       int err;
-       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
-@@ -59,15 +60,22 @@ struct buffer_head *ext4_append(handle_t *handle,
-                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
-               return ERR_PTR(-ENOSPC);
-+      /* with parallel dir operations all appends
-+      * have to be serialized -bzzz */
-+      down(&ei->i_append_sem);
-+
-       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-       bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
--      if (IS_ERR(bh))
-+      if (IS_ERR(bh)) {
-+              up(&ei->i_append_sem);
-               return bh;
-+      }
-       inode->i_size += inode->i_sb->s_blocksize;
-       EXT4_I(inode)->i_disksize = inode->i_size;
-       BUFFER_TRACE(bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, bh);
-+      up(&ei->i_append_sem);
-       if (err) {
-               brelse(bh);
-               ext4_std_error(inode->i_sb, err);
-@@ -247,7 +255,8 @@ static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(struct ext4_filename *fname,
-                                struct inode *dir,
-                                struct dx_hash_info *hinfo,
--                               struct dx_frame *frame);
-+                               struct dx_frame *frame,
-+                               struct htree_lock *lck);
- static void dx_release(struct dx_frame *frames);
- static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
-                      unsigned blocksize, struct dx_hash_info *hinfo,
-@@ -261,12 +270,13 @@ static void dx_insert_block(struct dx_frame *frame,
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash);
-+                               __u32 *start_hash, struct htree_lock *lck);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-               struct ext4_filename *fname,
--              struct ext4_dir_entry_2 **res_dir);
-+              struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
--                           struct dentry *dentry, struct inode *inode);
-+                           struct dentry *dentry, struct inode *inode,
-+                           struct htree_lock *lck);
- /* checksumming functions */
- void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
-@@ -733,6 +743,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
- }
- #endif /* DX_DEBUG */
-+/* private data for htree_lock */
-+struct ext4_dir_lock_data {
-+      unsigned                ld_flags;  /* bits-map for lock types */
-+      unsigned                ld_count;  /* # entries of the last DX block */
-+      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
-+      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
-+};
-+
-+#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
-+#define ext4_find_entry(dir, name, dirent, inline) \
-+                      __ext4_find_entry(dir, name, dirent, inline, NULL)
-+#define ext4_add_entry(handle, dentry, inode) \
-+                      __ext4_add_entry(handle, dentry, inode, NULL)
-+
-+/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
-+#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
-+
-+static void ext4_htree_event_cb(void *target, void *event)
-+{
-+      u64 *block = (u64 *)target;
-+
-+      if (*block == dx_get_block((struct dx_entry *)event))
-+              *block = EXT4_HTREE_NODE_CHANGED;
-+}
-+
-+struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
-+{
-+      struct htree_lock_head *lhead;
-+
-+      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
-+      if (lhead != NULL) {
-+              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
-+                                      ext4_htree_event_cb);
-+      }
-+      return lhead;
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
-+
-+struct htree_lock *ext4_htree_lock_alloc(void)
-+{
-+      return htree_lock_alloc(EXT4_LK_MAX,
-+                              sizeof(struct ext4_dir_lock_data));
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_alloc);
-+
-+static htree_lock_mode_t ext4_htree_mode(unsigned flags)
-+{
-+      switch (flags) {
-+      default: /* 0 or unknown flags require EX lock */
-+              return HTREE_LOCK_EX;
-+      case EXT4_HLOCK_READDIR:
-+              return HTREE_LOCK_PR;
-+      case EXT4_HLOCK_LOOKUP:
-+              return HTREE_LOCK_CR;
-+      case EXT4_HLOCK_DEL:
-+      case EXT4_HLOCK_ADD:
-+              return HTREE_LOCK_CW;
-+      }
-+}
-+
-+/* return PR for read-only operations, otherwise return EX */
-+static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
-+{
-+      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
-+
-+      /* 0 requires EX lock */
-+      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
-+}
-+
-+static int ext4_htree_safe_locked(struct htree_lock *lck)
-+{
-+      int writer;
-+
-+      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
-+              return 1;
-+
-+      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
-+               EXT4_LB_DE;
-+      if (writer) /* all readers & writers are excluded? */
-+              return lck->lk_mode == HTREE_LOCK_EX;
-+
-+      /* all writers are excluded? */
-+      return lck->lk_mode == HTREE_LOCK_PR ||
-+             lck->lk_mode == HTREE_LOCK_PW ||
-+             lck->lk_mode == HTREE_LOCK_EX;
-+}
-+
-+/* relock htree_lock with EX mode if it's change operation, otherwise
-+ * relock it with PR mode. It's noop if PDO is disabled. */
-+static void ext4_htree_safe_relock(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck)) {
-+              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
-+
-+              htree_change_lock(lck, ext4_htree_safe_mode(flags));
-+      }
-+}
-+
-+void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                   struct inode *dir, unsigned flags)
-+{
-+      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
-+                                            ext4_htree_safe_mode(flags);
-+
-+      ext4_htree_lock_data(lck)->ld_flags = flags;
-+      htree_lock(lck, lhead, mode);
-+      if (!is_dx(dir))
-+              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
-+}
-+EXPORT_SYMBOL(ext4_htree_lock);
-+
-+static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
-+                              unsigned lmask, int wait, void *ev)
-+{
-+      u32     key = (at == NULL) ? 0 : dx_get_block(at);
-+      u32     mode;
-+
-+      /* NOOP if htree is well protected or caller doesn't require the lock */
-+      if (ext4_htree_safe_locked(lck) ||
-+         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
-+              return 1;
-+
-+      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
-+              HTREE_LOCK_PW : HTREE_LOCK_PR;
-+      while (1) {
-+              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
-+                      return 1;
-+              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
-+                      return 0;
-+              cpu_relax(); /* spin until granted */
-+      }
-+}
-+
-+static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
-+{
-+      return ext4_htree_safe_locked(lck) ||
-+             htree_node_is_granted(lck, ffz(~lmask));
-+}
-+
-+static void ext4_htree_node_unlock(struct htree_lock *lck,
-+                                 unsigned lmask, void *buf)
-+{
-+      /* NB: it's safe to call mutiple times or even it's not locked */
-+      if (!ext4_htree_safe_locked(lck) &&
-+           htree_node_is_granted(lck, ffz(~lmask)))
-+              htree_node_unlock(lck, ffz(~lmask), buf);
-+}
-+
-+#define ext4_htree_dx_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
-+#define ext4_htree_dx_lock_try(lck, key)      \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
-+#define ext4_htree_dx_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
-+#define ext4_htree_dx_locked(lck)             \
-+      ext4_htree_node_locked(lck, EXT4_LB_DX)
-+
-+static void ext4_htree_dx_need_lock(struct htree_lock *lck)
-+{
-+      struct ext4_dir_lock_data *ld;
-+
-+      if (ext4_htree_safe_locked(lck))
-+              return;
-+
-+      ld = ext4_htree_lock_data(lck);
-+      switch (ld->ld_flags) {
-+      default:
-+              return;
-+      case EXT4_HLOCK_LOOKUP:
-+              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
-+              return;
-+      case EXT4_HLOCK_DEL:
-+              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
-+              return;
-+      case EXT4_HLOCK_ADD:
-+              ld->ld_flags = EXT4_HLOCK_SPLIT;
-+              return;
-+      }
-+}
-+
-+#define ext4_htree_de_lock(lck, key)          \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
-+#define ext4_htree_de_unlock(lck)             \
-+      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
-+
-+#define ext4_htree_spin_lock(lck, key, event) \
-+      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
-+#define ext4_htree_spin_unlock(lck)           \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
-+#define ext4_htree_spin_unlock_listen(lck, p) \
-+      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
-+
-+static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
-+{
-+      if (!ext4_htree_safe_locked(lck) &&
-+          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
-+              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
-+}
-+
-+enum {
-+      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
-+      DX_HASH_COL_YES,        /* there is collision and it does matter */
-+      DX_HASH_COL_NO,         /* there is no collision */
-+};
-+
-+static int dx_probe_hash_collision(struct htree_lock *lck,
-+                                 struct dx_entry *entries,
-+                                 struct dx_entry *at, u32 hash)
-+{
-+      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
-+              return DX_HASH_COL_IGNORE; /* don't care about collision */
-+
-+      } else if (at == entries + dx_get_count(entries) - 1) {
-+              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
-+
-+      } else { /* hash collision? */
-+              return ((dx_get_hash(at + 1) & ~1) == hash) ?
-+                      DX_HASH_COL_YES : DX_HASH_COL_NO;
-+      }
-+}
-+
- /*
-  * Probe for a directory leaf block to search.
-  *
-@@ -744,10 +975,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
-  */
- static struct dx_frame *
- dx_probe(struct ext4_filename *fname, struct inode *dir,
--       struct dx_hash_info *hinfo, struct dx_frame *frame_in)
-+       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
-+       struct htree_lock *lck)
- {
-       unsigned count, indirect;
--      struct dx_entry *at, *entries, *p, *q, *m;
-+      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
-       struct dx_root_info *info;
-       struct dx_frame *frame = frame_in;
-       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
-@@ -808,8 +1040,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-       dxtrace(printk("Look up %x", hash));
-       while (1) {
-+              if (indirect == 0) { /* the last index level */
-+                      /* NB: ext4_htree_dx_lock() could be noop if
-+                       * DX-lock flag is not set for current operation */
-+                      ext4_htree_dx_lock(lck, dx);
-+                      ext4_htree_spin_lock(lck, dx, NULL);
-+              }
-               count = dx_get_count(entries);
--              if (!count || count > dx_get_limit(entries)) {
-+              if (count == 0 || count > dx_get_limit(entries)) {
-+                      ext4_htree_spin_unlock(lck); /* release spin */
-                       ext4_warning_inode(dir,
-                                          "dx entry: count %u beyond limit %u",
-                                          count, dx_get_limit(entries));
-@@ -847,8 +1086,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
-                              dx_get_block(at)));
-               frame->entries = entries;
-               frame->at = at;
--              if (!indirect--)
-+
-+              if (indirect == 0) { /* the last index level */
-+                      struct ext4_dir_lock_data *ld;
-+                      u64 myblock;
-+
-+                      /* By default we only lock DE-block, however, we will
-+                       * also lock the last level DX-block if:
-+                       * a) there is hash collision
-+                       *    we will set DX-lock flag (a few lines below)
-+                       *    and redo to lock DX-block
-+                       *    see detail in dx_probe_hash_collision()
-+                       * b) it's a retry from splitting
-+                       *    we need to lock the last level DX-block so nobody
-+                       *    else can split any leaf blocks under the same
-+                       *    DX-block, see detail in ext4_dx_add_entry()
-+                       */
-+                      if (ext4_htree_dx_locked(lck)) {
-+                              /* DX-block is locked, just lock DE-block
-+                               * and return */
-+                              ext4_htree_spin_unlock(lck);
-+                              if (!ext4_htree_safe_locked(lck))
-+                                      ext4_htree_de_lock(lck, frame->at);
-+                              return frame;
-+                      }
-+                      /* it's pdirop and no DX lock */
-+                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
-+                          DX_HASH_COL_YES) {
-+                              /* found hash collision, set DX-lock flag
-+                               * and retry to abtain DX-lock */
-+                              ext4_htree_spin_unlock(lck);
-+                              ext4_htree_dx_need_lock(lck);
-+                              continue;
-+                      }
-+                      ld = ext4_htree_lock_data(lck);
-+                      /* because I don't lock DX, so @at can't be trusted
-+                       * after I release spinlock so I have to save it */
-+                      ld->ld_at = at;
-+                      ld->ld_at_entry = *at;
-+                      ld->ld_count = dx_get_count(entries);
-+
-+                      frame->at = &ld->ld_at_entry;
-+                      myblock = dx_get_block(at);
-+
-+                      /* NB: ordering locking */
-+                      ext4_htree_spin_unlock_listen(lck, &myblock);
-+                      /* other thread can split this DE-block because:
-+                       * a) I don't have lock for the DE-block yet
-+                       * b) I released spinlock on DX-block
-+                       * if it happened I can detect it by listening
-+                       * splitting event on this DE-block */
-+                      ext4_htree_de_lock(lck, frame->at);
-+                      ext4_htree_spin_stop_listen(lck);
-+
-+                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
-+                              /* someone split this DE-block before
-+                               * I locked it, I need to retry and lock
-+                               * valid DE-block */
-+                              ext4_htree_de_unlock(lck);
-+                              continue;
-+                      }
-                       return frame;
-+              }
-+              dx = at;
-+              indirect--;
-               frame++;
-               frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
-               if (IS_ERR(frame->bh)) {
-@@ -915,7 +1216,7 @@ static void dx_release(struct dx_frame *frames)
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-                                struct dx_frame *frame,
-                                struct dx_frame *frames,
--                               __u32 *start_hash)
-+                               __u32 *start_hash, struct htree_lock *lck)
- {
-       struct dx_frame *p;
-       struct buffer_head *bh;
-@@ -930,12 +1231,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-        * this loop, num_frames indicates the number of interior
-        * nodes need to be read.
-        */
-+      ext4_htree_de_unlock(lck);
-       while (1) {
--              if (++(p->at) < p->entries + dx_get_count(p->entries))
--                      break;
-+              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
-+                      /* num_frames > 0 :
-+                       *   DX block
-+                       * ext4_htree_dx_locked:
-+                       *   frame->at is reliable pointer returned by dx_probe,
-+                       *   otherwise dx_probe already knew no collision */
-+                      if (++(p->at) < p->entries + dx_get_count(p->entries))
-+                              break;
-+              }
-               if (p == frames)
-                       return 0;
-               num_frames++;
-+              if (num_frames == 1)
-+                      ext4_htree_dx_unlock(lck);
-               p--;
-       }
-@@ -958,6 +1269,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-        * block so no check is necessary
-        */
-       while (num_frames--) {
-+              if (num_frames == 0) {
-+                      /* it's not always necessary, we just don't want to
-+                       * detect hash collision again */
-+                      ext4_htree_dx_need_lock(lck);
-+                      ext4_htree_dx_lock(lck, p->at);
-+              }
-+
-               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
-               if (IS_ERR(bh))
-                       return PTR_ERR(bh);
-@@ -966,6 +1284,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
-               p->bh = bh;
-               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-       }
-+      ext4_htree_de_lock(lck, p->at);
-       return 1;
- }
-@@ -1110,10 +1429,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-       }
-       hinfo.hash = start_hash;
-       hinfo.minor_hash = 0;
--      frame = dx_probe(NULL, dir, &hinfo, frames);
-+      /* assume it's PR locked */
-+      frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
--
-       /* Add '.' and '..' from the htree header */
-       if (!start_hash && !start_minor_hash) {
-               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-@@ -1148,7 +1467,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-               count += ret;
-               hashval = ~0;
-               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
--                                          frame, frames, &hashval);
-+                                          frame, frames, &hashval, NULL);
-               *next_hash = hashval;
-               if (ret < 0) {
-                       err = ret;
-@@ -1372,10 +1691,10 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
-  * The returned buffer_head has ->b_count elevated.  The caller is expected
-  * to brelse() it when appropriate.
-  */
--static struct buffer_head * ext4_find_entry (struct inode *dir,
-+struct buffer_head *__ext4_find_entry(struct inode *dir,
-                                       const struct qstr *d_name,
-                                       struct ext4_dir_entry_2 **res_dir,
--                                      int *inlined)
-+                                      int *inlined, struct htree_lock *lck)
- {
-       struct super_block *sb;
-       struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -1423,7 +1742,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
-               goto restart;
-       }
-       if (is_dx(dir)) {
--              ret = ext4_dx_find_entry(dir, &fname, res_dir);
-+              ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
-               /*
-                * On success, or if the error was file not found,
-                * return.  Otherwise, fall back to doing a search the
-@@ -1433,6 +1752,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
-                       goto cleanup_and_exit;
-               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
-                              "falling back\n"));
-+              ext4_htree_safe_relock(lck);
-       }
-       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
-       start = EXT4_I(dir)->i_dir_start_lookup;
-@@ -1528,10 +1848,12 @@ cleanup_and_exit:
-       ext4_fname_free_filename(&fname);
-       return ret;
- }
-+EXPORT_SYMBOL(__ext4_find_entry);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-                       struct ext4_filename *fname,
--                      struct ext4_dir_entry_2 **res_dir)
-+                      struct ext4_dir_entry_2 **res_dir,
-+                      struct htree_lock *lck)
- {
-       struct super_block * sb = dir->i_sb;
-       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-@@ -1543,7 +1865,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
-       *res_dir = NULL;
- #endif
--      frame = dx_probe(fname, dir, NULL, frames);
-+      frame = dx_probe(fname, dir, NULL, frames, lck);
-       if (IS_ERR(frame))
-               return (struct buffer_head *) frame;
-       do {
-@@ -1565,7 +1887,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-               /* Check to see if we should continue to search */
-               retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
--                                             frames, NULL);
-+                                             frames, NULL, lck);
-               if (retval < 0) {
-                       ext4_warning_inode(dir,
-                               "error %d reading directory index block",
-@@ -1738,8 +2060,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
-  * Returns pointer to de in block into which the new entry will be inserted.
-  */
- static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
--                      struct buffer_head **bh,struct dx_frame *frame,
--                      struct dx_hash_info *hinfo)
-+                      struct buffer_head **bh, struct dx_frame *frames,
-+                      struct dx_frame *frame, struct dx_hash_info *hinfo,
-+                      struct htree_lock *lck)
- {
-       unsigned blocksize = dir->i_sb->s_blocksize;
-       unsigned count, continued;
-@@ -1801,8 +2124,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-                                       hash2, split, count-split));
-       /* Fancy dance to stay within two buffers */
--      de2 = dx_move_dirents(data1, data2, map + split, count - split,
--                            blocksize);
-+      if (hinfo->hash < hash2) {
-+              de2 = dx_move_dirents(data1, data2, map + split,
-+                                    count - split, blocksize);
-+      } else {
-+              /* make sure we will add entry to the same block which
-+               * we have already locked */
-+              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
-+      }
-       de = dx_pack_dirents(data1, blocksize);
-       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
-                                          (char *) de,
-@@ -1823,12 +2152,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-       dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
-                       blocksize, 1));
--      /* Which block gets the new entry? */
--      if (hinfo->hash >= hash2) {
--              swap(*bh, bh2);
--              de = de2;
-+      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
-+                           frame->at); /* notify block is being split */
-+      if (hinfo->hash < hash2) {
-+              dx_insert_block(frame, hash2 + continued, newblock);
-+
-+      } else {
-+              /* switch block number */
-+              dx_insert_block(frame, hash2 + continued,
-+                              dx_get_block(frame->at));
-+              dx_set_block(frame->at, newblock);
-+              (frame->at)++;
-       }
--      dx_insert_block(frame, hash2 + continued, newblock);
-+      ext4_htree_spin_unlock(lck);
-+      ext4_htree_dx_unlock(lck);
-+
-       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
-       if (err)
-               goto journal_error;
-@@ -2122,7 +2460,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
-       if (retval)
-               goto out_frames;        
--      de = do_split(handle,dir, &bh, frame, &fname->hinfo);
-+      de = do_split(handle,dir, &bh, frames, frame, &fname->hinfo, NULL);
-       if (IS_ERR(de)) {
-               retval = PTR_ERR(de);
-               goto out_frames;
-@@ -2233,8 +2571,8 @@ out:
-  * may not sleep between calling this and putting something into
-  * the entry, as someone else might have used it while you slept.
-  */
--static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
--                        struct inode *inode)
-+int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+                    struct inode *inode, struct htree_lock *lck)
- {
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct buffer_head *bh = NULL;
-@@ -2275,9 +2613,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-               if (dentry->d_name.len == 2 &&
-                   memcmp(dentry->d_name.name, "..", 2) == 0)
-                       return ext4_update_dotdot(handle, dentry, inode);
--              retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
-+              retval = ext4_dx_add_entry(handle, &fname, dentry, inode, lck);
-               if (!retval || (retval != ERR_BAD_DX_DIR))
-                       goto out;
-+              ext4_htree_safe_relock(lck);
-               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
-               dx_fallback++;
-               ext4_mark_inode_dirty(handle, dir);
-@@ -2327,12 +2666,14 @@ out:
-               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
-       return retval;
- }
-+EXPORT_SYMBOL(__ext4_add_entry);
- /*
-  * Returns 0 for success, or a negative error value
-  */
- static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
--                           struct dentry *dentry, struct inode *inode)
-+                           struct dentry *dentry, struct inode *inode,
-+                           struct htree_lock *lck)
- {
-       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
-       struct dx_entry *entries, *at;
-@@ -2345,7 +2686,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
- again:
-       restart = 0;
--      frame = dx_probe(fname, dir, NULL, frames);
-+      frame = dx_probe(fname, dir, NULL, frames, lck);
-       if (IS_ERR(frame))
-               return PTR_ERR(frame);
-       entries = frame->entries;
-@@ -2375,6 +2716,11 @@ again:
-               struct dx_node *node2;
-               struct buffer_head *bh2;
-+              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
-+                      ext4_htree_safe_relock(lck);
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-               while (frame > frames) {
-                       if (dx_get_count((frame - 1)->entries) <
-                           dx_get_limit((frame - 1)->entries)) {
-@@ -2474,8 +2820,32 @@ again:
-                       restart = 1;
-                       goto journal_error;
-               }
-+      } else if (!ext4_htree_dx_locked(lck)) {
-+              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
-+
-+              /* not well protected, require DX lock */
-+              ext4_htree_dx_need_lock(lck);
-+              at = frame > frames ? (frame - 1)->at : NULL;
-+
-+              /* NB: no risk of deadlock because it's just a try.
-+               *
-+               * NB: we check ld_count for twice, the first time before
-+               * having DX lock, the second time after holding DX lock.
-+               *
-+               * NB: We never free blocks for directory so far, which
-+               * means value returned by dx_get_count() should equal to
-+               * ld->ld_count if nobody split any DE-block under @at,
-+               * and ld->ld_at still points to valid dx_entry. */
-+              if ((ld->ld_count != dx_get_count(entries)) ||
-+                  !ext4_htree_dx_lock_try(lck, at) ||
-+                  (ld->ld_count != dx_get_count(entries))) {
-+                      restart = 1;
-+                      goto cleanup;
-+              }
-+              /* OK, I've got DX lock and nothing changed */
-+              frame->at = ld->ld_at;
-       }
--      de = do_split(handle, dir, &bh, frame, &fname->hinfo);
-+      de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
-       if (IS_ERR(de)) {
-               err = PTR_ERR(de);
-               goto cleanup;
-@@ -2486,6 +2856,8 @@ again:
- journal_error:
-       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
- cleanup:
-+      ext4_htree_dx_unlock(lck);
-+      ext4_htree_de_unlock(lck);
-       brelse(bh);
-       dx_release(frames);
-       /* @restart is true means htree-path has been changed, we need to
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 02fe65b..be65ad4 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -896,6 +896,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
-       ei->vfs_inode.i_version = 1;
-       spin_lock_init(&ei->i_raw_lock);
-+      sema_init(&ei->i_append_sem, 1);
-       INIT_LIST_HEAD(&ei->i_prealloc_list);
-       spin_lock_init(&ei->i_prealloc_lock);
-       ext4_es_init_tree(&ei->i_es_tree);
-diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
-new file mode 100644
-index 0000000..9dc7788
---- /dev/null
-+++ b/include/linux/htree_lock.h
-@@ -0,0 +1,187 @@
-+/*
-+ * include/linux/htree_lock.h
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+
-+/*
-+ * htree lock
-+ *
-+ * htree_lock is an advanced lock, it can support five lock modes (concept is
-+ * taken from DLM) and it's a sleeping lock.
-+ *
-+ * most common use case is:
-+ * - create a htree_lock_head for data
-+ * - each thread (contender) creates it's own htree_lock
-+ * - contender needs to call htree_lock(lock_node, mode) to protect data and
-+ *   call htree_unlock to release lock
-+ *
-+ * Also, there is advanced use-case which is more complex, user can have
-+ * PW/PR lock on particular key, it's mostly used while user holding shared
-+ * lock on the htree (CW, CR)
-+ *
-+ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
-+ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
-+ * ...
-+ * htree_node_unlock(lock_node);; unlock the key
-+ *
-+ * Another tip is, we can have N-levels of this kind of keys, all we need to
-+ * do is specifying N-levels while creating htree_lock_head, then we can
-+ * lock/unlock a specific level by:
-+ * htree_node_lock(lock_node, mode1, key1, level1...);
-+ * do something;
-+ * htree_node_lock(lock_node, mode1, key2, level2...);
-+ * do something;
-+ * htree_node_unlock(lock_node, level2);
-+ * htree_node_unlock(lock_node, level1);
-+ *
-+ * NB: for multi-level, should be careful about locking order to avoid deadlock
-+ */
-+
-+#ifndef _LINUX_HTREE_LOCK_H
-+#define _LINUX_HTREE_LOCK_H
-+
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Lock Modes
-+ * more details can be found here:
-+ * http://en.wikipedia.org/wiki/Distributed_lock_manager
-+ */
-+typedef enum {
-+      HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
-+      HTREE_LOCK_PW,       /* protected write: allows only CR users */
-+      HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
-+      HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
-+      HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
-+      HTREE_LOCK_MAX,      /* number of lock modes */
-+} htree_lock_mode_t;
-+
-+#define HTREE_LOCK_NL         HTREE_LOCK_MAX
-+#define HTREE_LOCK_INVAL      0xdead10c
-+
-+enum {
-+      HTREE_HBITS_MIN         = 2,
-+      HTREE_HBITS_DEF         = 14,
-+      HTREE_HBITS_MAX         = 32,
-+};
-+
-+enum {
-+      HTREE_EVENT_DISABLE     = (0),
-+      HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
-+      HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
-+      HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
-+};
-+
-+struct htree_lock;
-+
-+typedef void (*htree_event_cb_t)(void *target, void *event);
-+
-+struct htree_lock_child {
-+      struct list_head        lc_list;        /* granted list */
-+      htree_event_cb_t        lc_callback;    /* event callback */
-+      unsigned                lc_events;      /* event types */
-+};
-+
-+struct htree_lock_head {
-+      unsigned long           lh_lock;        /* bits lock */
-+      /* blocked lock list (htree_lock) */
-+      struct list_head        lh_blocked_list;
-+      /* # key levels */
-+      u16                     lh_depth;
-+      /* hash bits for key and limit number of locks */
-+      u16                     lh_hbits;
-+      /* counters for blocked locks */
-+      u16                     lh_nblocked[HTREE_LOCK_MAX];
-+      /* counters for granted locks */
-+      u16                     lh_ngranted[HTREE_LOCK_MAX];
-+      /* private data */
-+      void                    *lh_private;
-+      /* array of children locks */
-+      struct htree_lock_child lh_children[0];
-+};
-+
-+/* htree_lock_node_t is child-lock for a specific key (ln_value) */
-+struct htree_lock_node {
-+      htree_lock_mode_t       ln_mode;
-+      /* major hash key */
-+      u16                     ln_major_key;
-+      /* minor hash key */
-+      u16                     ln_minor_key;
-+      struct list_head        ln_major_list;
-+      struct list_head        ln_minor_list;
-+      /* alive list, all locks (granted, blocked, listening) are on it */
-+      struct list_head        ln_alive_list;
-+      /* blocked list */
-+      struct list_head        ln_blocked_list;
-+      /* granted list */
-+      struct list_head        ln_granted_list;
-+      void                    *ln_ev_target;
-+};
-+
-+struct htree_lock {
-+      struct task_struct      *lk_task;
-+      struct htree_lock_head  *lk_head;
-+      void                    *lk_private;
-+      unsigned                lk_depth;
-+      htree_lock_mode_t       lk_mode;
-+      struct list_head        lk_blocked_list;
-+      struct htree_lock_node  lk_nodes[0];
-+};
-+
-+/* create a lock head, which stands for a resource */
-+struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
-+                                            unsigned hbits, unsigned priv);
-+/* free a lock head */
-+void htree_lock_head_free(struct htree_lock_head *lhead);
-+/* register event callback for child lock at level @depth */
-+void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
-+                           unsigned events, htree_event_cb_t callback);
-+/* create a lock handle, which stands for a thread */
-+struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
-+/* free a lock handle */
-+void htree_lock_free(struct htree_lock *lck);
-+/* lock htree, when @wait is true, 0 is returned if the lock can't
-+ * be granted immediately */
-+int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+                 htree_lock_mode_t mode, int wait);
-+/* unlock htree */
-+void htree_unlock(struct htree_lock *lck);
-+/* unlock and relock htree with @new_mode */
-+int htree_change_lock_try(struct htree_lock *lck,
-+                        htree_lock_mode_t new_mode, int wait);
-+void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
-+/* require child lock (key) of htree at level @dep, @event will be sent to all
-+ * listeners on this @key while lock being granted */
-+int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+                      u32 key, unsigned dep, int wait, void *event);
-+/* release child lock at level @dep, this lock will listen on it's key
-+ * if @event isn't NULL, event_cb will be called against @lck while granting
-+ * any other lock at level @dep with the same key */
-+void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
-+/* stop listening on child lock at level @dep */
-+void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
-+/* for debug */
-+void htree_lock_stat_print(int depth);
-+void htree_lock_stat_reset(void);
-+
-+#define htree_lock(lck, lh, mode)     htree_lock_try(lck, lh, mode, 1)
-+#define htree_change_lock(lck, mode)  htree_change_lock_try(lck, mode, 1)
-+
-+#define htree_lock_mode(lck)          ((lck)->lk_mode)
-+
-+#define htree_node_lock(lck, mode, key, dep)  \
-+      htree_node_lock_try(lck, mode, key, dep, 1, NULL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_granted(lck, dep)               \
-+      ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
-+       (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_listening(lck, dep)     \
-+      ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
-+
-+#endif
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.2.series b/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.2.series
deleted file mode 100644 (file)
index b9813c7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rhel7/ext4-mballoc-prefetch.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.3.series b/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.3.series
deleted file mode 100644 (file)
index b9813c7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rhel7/ext4-mballoc-prefetch.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.4.series b/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.4.series
deleted file mode 100644 (file)
index b9813c7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rhel7/ext4-mballoc-prefetch.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.5.series b/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.5.series
deleted file mode 100644 (file)
index b9813c7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rhel7/ext4-mballoc-prefetch.patch
index fcb7484..cbace2c 100644 (file)
@@ -39,7 +39,7 @@ rhel7.6/ext4-export-mb-stream-allocator-variables.patch
 rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
 rhel7.6/ext4-simple-blockalloc.patch
 rhel7.6/ext4-mballoc-skip-uninit-groups-cr0.patch
 rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
 rhel7.6/ext4-simple-blockalloc.patch
 rhel7.6/ext4-mballoc-skip-uninit-groups-cr0.patch
-rhel7/ext4-mballoc-prefetch.patch
+rhel7.6/ext4-mballoc-prefetch.patch
 rhel7.6/ext4-track-extent-status-tree-shrinker-delay-statict.patch
 rhel7.6/ext4-remove-extent-status-procfs-files-if-journal-lo.patch
 rhel7.6/ext4-change-LRU-to-round-robin-in-extent-status-tree.patch
 rhel7.6/ext4-track-extent-status-tree-shrinker-delay-statict.patch
 rhel7.6/ext4-remove-extent-status-procfs-files-if-journal-lo.patch
 rhel7.6/ext4-change-LRU-to-round-robin-in-extent-status-tree.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.series b/ldiskfs/kernel_patches/series/ldiskfs-3.10-rhel7.series
deleted file mode 100644 (file)
index b9813c7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rhel7/ext4-mballoc-prefetch.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series b/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series
deleted file mode 100644 (file)
index 671a918..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-sles12/ext4-inode-version.patch
-rhel7.6/ext4-lookup-dotdot.patch
-rhel7.6/ext4-print-inum-in-htree-warning.patch
-sles12/ext4-prealloc.patch
-sles12/ext4-osd-iop-common.patch
-sles12/ext4-misc.patch
-rhel7.6/ext4-mballoc-extra-checks.patch
-rhel7.6/ext4-hash-indexed-dir-dotdot-update.patch
-rhel7.6/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-sles12/ext4-data-in-dirent.patch
-sles12/ext4-large-eas.patch
-sles12/ext4-disable-mb-cache.patch
-rhel7.6/ext4-nocmtime.patch
-sles12/ext4-large-dir.patch
-sles12/ext4-pdirop.patch
-rhel7.6/ext4-max-dir-size.patch
-sles12/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-rhel7.6/ext4-give-warning-with-dir-htree-growing.patch
-rhel7.6/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12sp1.series b/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12sp1.series
deleted file mode 100644 (file)
index 1b95066..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-sles12/ext4-inode-version.patch
-sles12sp1/ext4-lookup-dotdot.patch
-rhel7.6/ext4-print-inum-in-htree-warning.patch
-sles12/ext4-prealloc.patch
-sles12/ext4-osd-iop-common.patch
-sles12/ext4-misc.patch
-rhel7.6/ext4-mballoc-extra-checks.patch
-rhel7.6/ext4-hash-indexed-dir-dotdot-update.patch
-rhel7.6/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-sles12/ext4-data-in-dirent.patch
-sles12/ext4-large-eas.patch
-sles12/ext4-disable-mb-cache.patch
-rhel7.6/ext4-nocmtime.patch
-sles12sp1/ext4-large-dir.patch
-sles12/ext4-pdirop.patch
-rhel7.6/ext4-max-dir-size.patch
-sles12/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-rhel7.6/ext4-give-warning-with-dir-htree-growing.patch
-rhel7.6/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-sles12sp1/ext4-attach-jinode-in-writepages.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
index 95acb3e..4c0e3e0 100644 (file)
@@ -7,7 +7,7 @@ sles12sp2/ext4-misc.patch
 sles12sp3/ext4-mballoc-extra-checks.patch
 ubuntu18/ext4-hash-indexed-dir-dotdot-update.patch
 sles12sp2/ext4-kill-dx-root.patch
 sles12sp3/ext4-mballoc-extra-checks.patch
 ubuntu18/ext4-hash-indexed-dir-dotdot-update.patch
 sles12sp2/ext4-kill-dx-root.patch
-rhel7/ext4-mballoc-pa-free-mismatch.patch
+rhel7.6/ext4-mballoc-pa-free-mismatch.patch
 linux-5.4/ext4-data-in-dirent.patch
 suse15/ext4-large-dir.patch
 suse15/ext4-large-eas.patch
 linux-5.4/ext4-data-in-dirent.patch
 suse15/ext4-large-dir.patch
 suse15/ext4-large-eas.patch
@@ -20,8 +20,8 @@ ubuntu18/ext4-give-warning-with-dir-htree-growing.patch
 ubuntu18/ext4-jcb-optimization.patch
 ubuntu18/ext4-attach-jinode-in-writepages.patch
 suse15/ext4-dont-check-before-replay.patch
 ubuntu18/ext4-jcb-optimization.patch
 ubuntu18/ext4-attach-jinode-in-writepages.patch
 suse15/ext4-dont-check-before-replay.patch
-rhel7.2/ext4-dont-check-in-ro.patch
-rhel7/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
-rhel7/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
-rhel7/ext4-export-orphan-add.patch
+rhel7.6/ext4-dont-check-in-ro.patch
+rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
+rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
+rhel7.6/ext4-export-orphan-add.patch
 sles12sp2/ext4-export-mb-stream-allocator-variables.patch
 sles12sp2/ext4-export-mb-stream-allocator-variables.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-45-ubuntu14+16.series b/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-45-ubuntu14+16.series
deleted file mode 100644 (file)
index a3e91ea..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-sles12sp2/ext4-inode-version.patch
-sles12sp2/ext4-lookup-dotdot.patch
-sles12sp2/ext4-print-inum-in-htree-warning.patch
-sles12sp2/ext4-prealloc.patch
-sles12sp2/ext4-osd-iop-common.patch
-ubuntu14+16/ext4-misc.patch
-sles12sp2/ext4-mballoc-extra-checks.patch
-sles12sp2/ext4-hash-indexed-dir-dotdot-update.patch
-sles12sp2/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-ubuntu14+16/ext4-data-in-dirent.patch
-ubuntu14+16/ext4-large-eas.patch
-ubuntu14+16/ext4-disable-mb-cache.patch
-rhel7.6/ext4-nocmtime.patch
-ubuntu14+16/ext4-large-dir.patch
-ubuntu14+16/ext4-pdirop.patch
-sles12sp2/ext4-max-dir-size.patch
-sles12sp2/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-sles12sp2/ext4-give-warning-with-dir-htree-growing.patch
-sles12sp2/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-sles12sp2/ext4-attach-jinode-in-writepages.patch
-rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-49-ubuntu14+16.series b/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-49-ubuntu14+16.series
deleted file mode 100644 (file)
index 1245873..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-sles12sp2/ext4-inode-version.patch
-sles12sp2/ext4-lookup-dotdot.patch
-sles12sp2/ext4-print-inum-in-htree-warning.patch
-sles12sp2/ext4-prealloc.patch
-sles12sp2/ext4-osd-iop-common.patch
-ubuntu14+16/ext4-misc.patch
-sles12sp2/ext4-mballoc-extra-checks.patch
-sles12sp2/ext4-hash-indexed-dir-dotdot-update.patch
-sles12sp2/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-ubuntu14+16/ext4-data-in-dirent-001.patch
-ubuntu14+16/ext4-large-eas.patch
-ubuntu14+16/ext4-disable-mb-cache.patch
-rhel7.6/ext4-nocmtime.patch
-ubuntu14+16/ext4-large-dir.patch
-ubuntu14+16/ext4-pdirop-001.patch
-sles12sp2/ext4-max-dir-size.patch
-sles12sp2/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-sles12sp2/ext4-give-warning-with-dir-htree-growing.patch
-sles12sp2/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-sles12sp2/ext4-attach-jinode-in-writepages.patch
-rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-62-ubuntu14+16.series b/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-62-ubuntu14+16.series
deleted file mode 100644 (file)
index c76c5a1..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-sles12sp2/ext4-inode-version.patch
-sles12sp2/ext4-lookup-dotdot.patch
-sles12sp2/ext4-print-inum-in-htree-warning.patch
-sles12sp2/ext4-prealloc.patch
-sles12sp2/ext4-osd-iop-common.patch
-ubuntu14+16/ext4-misc.patch
-sles12sp2/ext4-mballoc-extra-checks.patch
-sles12sp2/ext4-hash-indexed-dir-dotdot-update.patch
-sles12sp2/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-ubuntu14+16/ext4-data-in-dirent-001.patch
-ubuntu14+16/ext4-large-eas.patch
-ubuntu14+16/ext4-disable-mb-cache.patch
-rhel7.6/ext4-nocmtime.patch
-ubuntu14+16/ext4-large-dir-001.patch
-ubuntu14+16/ext4-pdirop-001.patch
-sles12sp2/ext4-max-dir-size.patch
-sles12sp2/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-sles12sp2/ext4-give-warning-with-dir-htree-growing.patch
-sles12sp2/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-sles12sp2/ext4-attach-jinode-in-writepages.patch
-rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-73-ubuntu14+16.series b/ldiskfs/kernel_patches/series/ldiskfs-4.4.0-73-ubuntu14+16.series
deleted file mode 100644 (file)
index 4994434..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-sles12sp2/ext4-inode-version.patch
-sles12sp2/ext4-lookup-dotdot.patch
-sles12sp2/ext4-print-inum-in-htree-warning.patch
-sles12sp2/ext4-prealloc.patch
-sles12sp2/ext4-osd-iop-common.patch
-ubuntu14+16/ext4-misc.patch
-sles12sp2/ext4-mballoc-extra-checks.patch
-sles12sp2/ext4-hash-indexed-dir-dotdot-update.patch
-sles12sp2/ext4-kill-dx-root.patch
-rhel7.6/ext4-mballoc-pa-free-mismatch.patch
-ubuntu14+16/ext4-data-in-dirent-001.patch
-ubuntu14+16/ext4-large-eas.patch
-ubuntu14+16/ext4-disable-mb-cache-001.patch
-rhel7.6/ext4-nocmtime.patch
-ubuntu14+16/ext4-large-dir-001.patch
-ubuntu14+16/ext4-pdirop-001.patch
-sles12sp2/ext4-max-dir-size.patch
-sles12sp2/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
-sles12sp2/ext4-give-warning-with-dir-htree-growing.patch
-sles12sp2/ext4-mmp-brelse.patch
-rhel7.6/ext4-jcb-optimization.patch
-sles12sp2/ext4-attach-jinode-in-writepages.patch
-rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
-rhel7.6/ext4-export-orphan-add.patch
-rhel7.6/ext4-mmp-dont-mark-bh-dirty.patch
-rhel7.6/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
-sles12sp2/ext4-export-mb-stream-allocator-variables.patch
-rhel7.6/ext4-optimize-ext4_find_delalloc_range-in-nodelalloc.patch