Whamcloud - gitweb
LU-11838 ldiskfs: add rhel8 server support 74/34674/6
authorLi Dongyang <dongyangli@ddn.com>
Mon, 15 Apr 2019 08:05:58 +0000 (18:05 +1000)
committerOleg Drokin <green@whamcloud.com>
Thu, 13 Jun 2019 04:17:37 +0000 (04:17 +0000)
This patch adds ldiskfs patch series for rhel8
kernel 4.18.0-32.el8.

Fix lustre-build-ldiskfs.m4, make
CONFIG_LDISKFS_FS_ENCRYPTION consistent with
kernel's config CONFIG_EXT4_FS_ENCRYPTION.
Otherwise ldiskfs won't build
on kernels with CONFIG_EXT4_FS_ENCRYPTION
disabled.

Note: this contains a small clean up for
ubuntu18/ext4-kill-dx-root.patch

Test-Parameters:trivial
Signed-off-by: Li Dongyang <dongyangli@ddn.com>
Change-Id: Ib500bff2f6688405b912620c5217586c8420c6e1
Reviewed-on: https://review.whamcloud.com/34674
Reviewed-by: Shaun Tancheff <stancheff@cray.com>
Tested-by: Jenkins
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Alex Zhuravlev <bzzz@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
12 files changed:
config/lustre-build-ldiskfs.m4
ldiskfs/kernel_patches/patches/rhel8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-dont-check-before-replay.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-export-mb-stream-allocator-variables.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-inode-version.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-mballoc-extra-checks.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-misc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-nocmtime.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-pdirop.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/rhel8/ext4-prealloc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ubuntu18/ext4-kill-dx-root.patch
ldiskfs/kernel_patches/series/ldiskfs-4.18-rhel8.series [new file with mode: 0644]

index 444b492..32420b1 100644 (file)
@@ -13,6 +13,7 @@ esac
 AS_IF([test -z "$LDISKFS_SERIES"], [
 AS_IF([test x$RHEL_KERNEL = xyes], [
        case $RHEL_RELEASE_NO in
 AS_IF([test -z "$LDISKFS_SERIES"], [
 AS_IF([test x$RHEL_KERNEL = xyes], [
        case $RHEL_RELEASE_NO in
+       80)     LDISKFS_SERIES="4.18-rhel8.series"      ;;
        76)     LDISKFS_SERIES="3.10-rhel7.6.series"    ;;
        75)     LDISKFS_SERIES="3.10-rhel7.5.series"    ;;
        74)     LDISKFS_SERIES="3.10-rhel7.4.series"    ;;
        76)     LDISKFS_SERIES="3.10-rhel7.6.series"    ;;
        75)     LDISKFS_SERIES="3.10-rhel7.5.series"    ;;
        74)     LDISKFS_SERIES="3.10-rhel7.4.series"    ;;
@@ -204,6 +205,42 @@ ext4_info_dquot, [
 ]) # LB_EXT4_HAVE_INFO_DQUOT
 
 #
 ]) # LB_EXT4_HAVE_INFO_DQUOT
 
 #
+# LB_EXT4_HAVE_I_CRYPT_INFO
+#
+# in linux 4.8 i_crypt_info moved from ext4_inode_info to struct inode
+#
+# Determine if we need to enable CONFIG_LDISKFS_FS_ENCRYPTION.
+# If we have i_crypt_info in ext4_inode_info, the config option
+# should be enabled to make the ldiskfs module compilation happy.
+# Otherwise i_crypy_info is in struct inode, we need to check kernel
+# config option to determine that.
+#
+AC_DEFUN([LB_EXT4_HAVE_I_CRYPT_INFO], [
+LB_CHECK_COMPILE([if i_crypt_info is in ext4_inode_info],
+ext4_i_crypt_info, [
+       #define CONFIG_EXT4_FS_ENCRYPTION 1
+       #include <linux/fs.h>
+       #include "$EXT4_SRC_DIR/ext4.h"
+],[
+       struct ext4_inode_info in;
+
+       in.i_crypt_info = NULL;
+],[
+       AC_DEFINE(
+               CONFIG_LDISKFS_FS_ENCRYPTION, 1,
+               [enable encryption for ldiskfs]
+       )
+],[
+       LB_CHECK_CONFIG([EXT4_FS_ENCRYPTION],[
+               AC_DEFINE(
+                       CONFIG_LDISKFS_FS_ENCRYPTION, 1,
+                       [enable encryption for ldiskfs]
+               )
+       ])
+])
+]) # LB_EXT4_HAVE_I_CRYPT_INFO
+
+#
 # LDISKFS_AC_PATCH_PROGRAM
 #
 # Determine which program should be used to apply the patches to
 # LDISKFS_AC_PATCH_PROGRAM
 #
 # Determine which program should be used to apply the patches to
@@ -285,11 +322,11 @@ AS_IF([test x$enable_ldiskfs != xno],[
        LB_LDISKFS_MAP_BLOCKS
        LB_EXT4_BREAD_4ARGS
        LB_EXT4_HAVE_INFO_DQUOT
        LB_LDISKFS_MAP_BLOCKS
        LB_EXT4_BREAD_4ARGS
        LB_EXT4_HAVE_INFO_DQUOT
+       LB_EXT4_HAVE_I_CRYPT_INFO
        AC_DEFINE(CONFIG_LDISKFS_FS_POSIX_ACL, 1, [posix acls for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_SECURITY, 1, [fs security for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_XATTR, 1, [extened attributes for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_RW, 1, [enable rw access for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_POSIX_ACL, 1, [posix acls for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_SECURITY, 1, [fs security for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_XATTR, 1, [extened attributes for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_RW, 1, [enable rw access for ldiskfs])
-       AC_DEFINE(CONFIG_LDISKFS_FS_ENCRYPTION, 1, [enable encryption for ldiskfs])
        AC_SUBST(LDISKFS_SUBDIR, ldiskfs)
        AC_DEFINE(HAVE_LDISKFS_OSD, 1, Enable ldiskfs osd)
 ], [
        AC_SUBST(LDISKFS_SUBDIR, ldiskfs)
        AC_DEFINE(HAVE_LDISKFS_OSD, 1, Enable ldiskfs osd)
 ], [
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
new file mode 100644 (file)
index 0000000..38e1862
--- /dev/null
@@ -0,0 +1,261 @@
+Since we could skip corrupt block groups, this patch
+use ext4_warning() intead of ext4_error() to make FS not
+emount RO in default
+
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/balloc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/balloc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/balloc.c
+@@ -373,7 +373,7 @@ static int ext4_validate_block_bitmap(st
+       if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+                       desc, bh))) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
++              ext4_warning(sb, "bg %u: bad block bitmap checksum", block_group);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EFSBADCRC;
+@@ -381,8 +381,8 @@ static int ext4_validate_block_bitmap(st
+       blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+       if (unlikely(blk != 0)) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+-                         block_group, blk);
++              ext4_warning(sb, "bg %u: block %llu: invalid block bitmap",
++                           block_group, blk);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EFSCORRUPTED;
+@@ -458,8 +458,8 @@ ext4_read_block_bitmap_nowait(struct sup
+               ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+               if (err) {
+-                      ext4_error(sb, "Failed to init block bitmap for group "
+-                                 "%u: %d", block_group, err);
++                      ext4_warning(sb, "Failed to init block bitmap for group "
++                                   "%u: %d", block_group, err);
+                       goto out;
+               }
+               goto verify;
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ialloc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ialloc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ialloc.c
+@@ -96,8 +96,8 @@ static int ext4_validate_inode_bitmap(st
+       if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
+-                         "inode_bitmap = %llu", block_group, blk);
++              ext4_warning(sb, "Corrupt inode bitmap - block_group = %u, "
++                           "inode_bitmap = %llu", block_group, blk);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_IBITMAP_CORRUPT);
+               return -EFSBADCRC;
+@@ -346,7 +346,7 @@ out:
+               if (!fatal)
+                       fatal = err;
+       } else {
+-              ext4_error(sb, "bit already cleared for inode %lu", ino);
++              ext4_warning(sb, "bit already cleared for inode %lu", ino);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_IBITMAP_CORRUPT);
+       }
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+@@ -741,10 +741,14 @@ int ext4_mb_generate_buddy(struct super_
+       grp->bb_fragments = fragments;
+       if (free != grp->bb_free) {
+-              ext4_grp_locked_error(sb, group, 0, 0,
+-                                    "block bitmap and bg descriptor "
+-                                    "inconsistent: %u vs %u free clusters",
+-                                    free, grp->bb_free);
++              struct ext4_group_desc *gdp;
++              gdp = ext4_get_group_desc(sb, group, NULL);
++              ext4_warning(sb, "group %lu: block bitmap and bg descriptor "
++                           "inconsistent: %u vs %u free clusters "
++                           "%u in gd, %lu pa's",
++                           (long unsigned int)group, free, grp->bb_free,
++                           ext4_free_group_clusters(sb, gdp),
++                           grp->bb_prealloc_nr);
+               /*
+                * If we intend to continue, we consider group descriptor
+                * corrupt and update bb_free using bitmap value
+@@ -1107,7 +1111,7 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+       int block;
+       int pnum;
+       int poff;
+-      struct page *page;
++      struct page *page = NULL;
+       int ret;
+       struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -1133,7 +1137,7 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+                */
+               ret = ext4_mb_init_group(sb, group, gfp);
+               if (ret)
+-                      return ret;
++                      goto err;
+       }
+       /*
+@@ -1236,6 +1240,7 @@ err:
+               put_page(e4b->bd_buddy_page);
+       e4b->bd_buddy = NULL;
+       e4b->bd_bitmap = NULL;
++      ext4_warning(sb, "Error loading buddy information for %u", group);
+       return ret;
+ }
+@@ -3654,9 +3659,11 @@ int ext4_mb_check_ondisk_bitmap(struct s
+       }
+       if (free != free_in_gdp) {
+-              ext4_error(sb, "on-disk bitmap for group %d"
++              ext4_warning(sb, "on-disk bitmap for group %d"
+                       "corrupted: %u blocks free in bitmap, %u - in gd\n",
+                       group, free, free_in_gdp);
++              ext4_mark_group_bitmap_corrupted(sb, group,
++                                      EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EIO;
+       }
+       return 0;
+@@ -4016,16 +4023,8 @@ ext4_mb_release_inode_pa(struct ext4_bud
+       /* "free < pa->pa_free" means we maybe double alloc the same blocks,
+        * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
+       if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
+-              ext4_error(sb, "pa free mismatch: [pa %p] "
+-                              "[phy %lu] [logic %lu] [len %u] [free %u] "
+-                              "[error %u] [inode %lu] [freed %u]", pa,
+-                              (unsigned long)pa->pa_pstart,
+-                              (unsigned long)pa->pa_lstart,
+-                              (unsigned)pa->pa_len, (unsigned)pa->pa_free,
+-                              (unsigned)pa->pa_error, pa->pa_inode->i_ino,
+-                              free);
+               ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
+-                                      free, pa->pa_free);
++                                    free, pa->pa_free);
+               /*
+                * pa is already deleted so we use the value obtained
+                * from the bitmap and continue.
+@@ -4087,15 +4086,11 @@ ext4_mb_discard_group_preallocations(str
+       bitmap_bh = ext4_read_block_bitmap(sb, group);
+       if (IS_ERR(bitmap_bh)) {
+               err = PTR_ERR(bitmap_bh);
+-              ext4_error(sb, "Error %d reading block bitmap for %u",
+-                         err, group);
+               return 0;
+       }
+       err = ext4_mb_load_buddy(sb, group, &e4b);
+       if (err) {
+-              ext4_warning(sb, "Error %d loading buddy information for %u",
+-                           err, group);
+               put_bh(bitmap_bh);
+               return 0;
+       }
+@@ -4256,17 +4251,12 @@ repeat:
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
+-              if (err) {
+-                      ext4_error(sb, "Error %d loading buddy information for %u",
+-                                 err, group);
++              if (err)
+                       return;
+-              }
+               bitmap_bh = ext4_read_block_bitmap(sb, group);
+               if (IS_ERR(bitmap_bh)) {
+                       err = PTR_ERR(bitmap_bh);
+-                      ext4_error(sb, "Error %d reading block bitmap for %u",
+-                                      err, group);
+                       ext4_mb_unload_buddy(&e4b);
+                       continue;
+               }
+@@ -4529,11 +4519,8 @@ ext4_mb_discard_lg_preallocations(struct
+               group = ext4_get_group_number(sb, pa->pa_pstart);
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
+-              if (err) {
+-                      ext4_error(sb, "Error %d loading buddy information for %u",
+-                                 err, group);
++              if (err)
+                       continue;
+-              }
+               ext4_lock_group(sb, group);
+               list_del(&pa->pa_group_list);
+               ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+@@ -4786,7 +4773,7 @@ errout:
+                        * been updated or not when fail case. So can
+                        * not revert pa_free back, just mark pa_error*/
+                       pa->pa_error++;
+-                      ext4_error(sb,
++                      ext4_warning(sb,
+                               "Updating bitmap error: [err %d] "
+                               "[pa %p] [phy %lu] [logic %lu] "
+                               "[len %u] [free %u] [error %u] "
+@@ -4797,6 +4784,7 @@ errout:
+                               (unsigned)pa->pa_free,
+                               (unsigned)pa->pa_error,
+                               pa->pa_inode ? pa->pa_inode->i_ino : 0);
++                      ext4_mark_group_bitmap_corrupted(sb, 0, 0);
+               }
+       }
+       ext4_mb_release_context(ac);
+@@ -5082,7 +5070,7 @@ do_more:
+       err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
+                                    GFP_NOFS|__GFP_NOFAIL);
+       if (err)
+-              goto error_return;
++              goto error_brelse;
+       /*
+        * We need to make sure we don't reuse the freed block until after the
+@@ -5164,8 +5152,9 @@ do_more:
+               goto do_more;
+       }
+ error_return:
+-      brelse(bitmap_bh);
+       ext4_std_error(sb, err);
++error_brelse:
++      brelse(bitmap_bh);
+       return;
+ }
+@@ -5265,7 +5254,7 @@ int ext4_group_add_blocks(handle_t *hand
+       err = ext4_mb_load_buddy(sb, block_group, &e4b);
+       if (err)
+-              goto error_return;
++              goto error_brelse;
+       /*
+        * need to update group_info->bb_free and bitmap
+@@ -5303,8 +5292,9 @@ int ext4_group_add_blocks(handle_t *hand
+               err = ret;
+ error_return:
+-      brelse(bitmap_bh);
+       ext4_std_error(sb, err);
++error_brelse:
++      brelse(bitmap_bh);
+       return err;
+ }
+@@ -5379,11 +5369,8 @@ ext4_trim_all_free(struct super_block *s
+       trace_ext4_trim_all_free(sb, group, start, max);
+       ret = ext4_mb_load_buddy(sb, group, &e4b);
+-      if (ret) {
+-              ext4_warning(sb, "Error %d loading buddy information for %u",
+-                           ret, group);
++      if (ret)
+               return ret;
+-      }
+       bitmap = e4b.bd_bitmap;
+       ext4_lock_group(sb, group);
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-dont-check-before-replay.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-dont-check-before-replay.patch
new file mode 100644 (file)
index 0000000..6413133
--- /dev/null
@@ -0,0 +1,36 @@
+When ldiskfs run in failover mode whith read-only disk.
+Part of allocation updates are lost and ldiskfs may fail
+while mounting this is due to inconsistent state of
+group-descriptor. Group-descriptor check is added after
+journal replay.
+
+Index: linux-4.18.0-32.el8.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-4.18.0-32.el8.x86_64.orig/fs/ext4/super.c
++++ linux-4.18.0-32.el8.x86_64/fs/ext4/super.c
+@@ -4092,11 +4092,6 @@ static int ext4_fill_super(struct super_
+               }
+       }
+       sbi->s_gdb_count = db_count;
+-      if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+-              ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
+-              ret = -EFSCORRUPTED;
+-              goto failed_mount2;
+-      }
+       timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
+@@ -4238,6 +4233,13 @@ static int ext4_fill_super(struct super_
+       sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
+ no_journal:
++
++      if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
++              ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
++              ret = -EFSCORRUPTED;
++              goto failed_mount_wq;
++      }
++
+       if (!test_opt(sb, NO_MBCACHE)) {
+               sbi->s_ea_block_cache = ext4_xattr_create_cache();
+               if (!sbi->s_ea_block_cache) {
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-export-mb-stream-allocator-variables.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-export-mb-stream-allocator-variables.patch
new file mode 100644 (file)
index 0000000..c7bc92f
--- /dev/null
@@ -0,0 +1,98 @@
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+@@ -2555,6 +2555,8 @@ extern void ext4_end_bitmap_read(struct
+ /* mballoc.c */
+ extern const struct file_operations ext4_seq_prealloc_table_fops;
+ extern const struct seq_operations ext4_mb_seq_groups_ops;
++extern const struct file_operations ext4_seq_mb_last_group_fops;
++extern int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v);
+ extern long ext4_mb_stats;
+ extern long ext4_mb_max_to_scan;
+ extern int ext4_mb_init(struct super_block *);
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+@@ -2459,6 +2459,65 @@ static struct kmem_cache *get_groupinfo_
+       return cachep;
+ }
++#define EXT4_MB_MAX_INPUT_STRING_SIZE 32
++
++static ssize_t ext4_mb_last_group_write(struct file *file,
++                                      const char __user *buf,
++                                      size_t cnt, loff_t *pos)
++{
++      char dummy[EXT4_MB_MAX_INPUT_STRING_SIZE + 1];
++      struct super_block *sb = PDE_DATA(file_inode(file));
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      unsigned long val;
++      char *end;
++
++      if (cnt > EXT4_MB_MAX_INPUT_STRING_SIZE)
++              return -EINVAL;
++      if (copy_from_user(dummy, buf, cnt))
++              return -EFAULT;
++      dummy[cnt] = '\0';
++      val = simple_strtoul(dummy, &end, 0);
++      if (dummy == end)
++              return -EINVAL;
++      if (val >= ext4_get_groups_count(sb))
++              return -ERANGE;
++      spin_lock(&sbi->s_md_lock);
++      sbi->s_mb_last_group = val;
++      sbi->s_mb_last_start = 0;
++      spin_unlock(&sbi->s_md_lock);
++      return cnt;
++}
++
++static int ext4_mb_seq_last_group_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++
++      seq_printf(m , "%ld\n", sbi->s_mb_last_group);
++      return 0;
++}
++
++static int ext4_mb_seq_last_group_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, ext4_mb_seq_last_group_seq_show, PDE_DATA(inode));
++}
++
++const struct file_operations ext4_seq_mb_last_group_fops = {
++      .owner         = THIS_MODULE,
++      .open          = ext4_mb_seq_last_group_open,
++      .read          = seq_read,
++      .llseek        = seq_lseek,
++      .release       = seq_release,
++      .write         = ext4_mb_last_group_write,
++};
++
++int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++
++      seq_printf(m , "%ld\n", sbi->s_mb_last_start);
++      return 0;
++}
++
+ /*
+  * Allocate the top-level s_group_info array for the specified number
+  * of groups
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/sysfs.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
+@@ -393,6 +393,10 @@ int ext4_register_sysfs(struct super_blo
+                               &ext4_mb_seq_groups_ops, sb);
+               proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
+                               &ext4_seq_prealloc_table_fops, sb);
++              proc_create_data("mb_last_group", S_IRUGO, sbi->s_proc,
++                              &ext4_seq_mb_last_group_fops, sb);
++              proc_create_single_data("mb_last_start", S_IRUGO, sbi->s_proc,
++                              ext4_mb_seq_last_start_seq_show, sb);
+       }
+       return 0;
+ }
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-inode-version.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-inode-version.patch
new file mode 100644 (file)
index 0000000..746b617
--- /dev/null
@@ -0,0 +1,46 @@
+Index: linux-4.18.0-32.el8.x86_64/fs/ext4/ialloc.c
+===================================================================
+--- linux-4.18.0-32.el8.x86_64.orig/fs/ext4/ialloc.c
++++ linux-4.18.0-32.el8.x86_64/fs/ext4/ialloc.c
+@@ -1100,6 +1100,7 @@ got:
+       ei->i_dtime = 0;
+       ei->i_block_group = group;
+       ei->i_last_alloc_group = ~0;
++      ei->i_fs_version = 0;
+       ext4_set_inode_flags(inode);
+       if (IS_DIRSYNC(inode))
+Index: linux-4.18.0-32.el8.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-32.el8.x86_64.orig/fs/ext4/ext4.h
++++ linux-4.18.0-32.el8.x86_64/fs/ext4/ext4.h
+@@ -1058,6 +1058,8 @@ struct ext4_inode_info {
+       struct dquot *i_dquot[MAXQUOTAS];
+ #endif
++      __u64 i_fs_version;
++
+       /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+       __u32 i_csum_seed;
+Index: linux-4.18.0-32.el8.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-4.18.0-32.el8.x86_64.orig/fs/ext4/inode.c
++++ linux-4.18.0-32.el8.x86_64/fs/ext4/inode.c
+@@ -4775,14 +4775,14 @@ static inline void ext4_inode_set_iversi
+       if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+               inode_set_iversion_raw(inode, val);
+       else
+-              inode_set_iversion_queried(inode, val);
++              EXT4_I(inode)->i_fs_version = val;
+ }
+ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
+ {
+       if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+               return inode_peek_iversion_raw(inode);
+       else
+-              return inode_peek_iversion(inode);
++              return EXT4_I(inode)->i_fs_version;
+ }
+ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-mballoc-extra-checks.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-mballoc-extra-checks.patch
new file mode 100644 (file)
index 0000000..acf7718
--- /dev/null
@@ -0,0 +1,301 @@
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+@@ -2881,6 +2881,7 @@ struct ext4_group_info {
+       ext4_grpblk_t   bb_fragments;   /* nr of freespace fragments */
+       ext4_grpblk_t   bb_largest_free_order;/* order of largest frag in BG */
+       struct          list_head bb_prealloc_list;
++      unsigned long   bb_prealloc_nr;
+ #ifdef DOUBLE_CHECK
+       void            *bb_bitmap;
+ #endif
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+@@ -352,7 +352,7 @@ static const char * const ext4_groupinfo
+       "ext4_groupinfo_64k", "ext4_groupinfo_128k"
+ };
+-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group);
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+                                               ext4_group_t group);
+@@ -708,7 +708,7 @@ mb_set_largest_free_order(struct super_b
+ }
+ static noinline_for_stack
+-void ext4_mb_generate_buddy(struct super_block *sb,
++int ext4_mb_generate_buddy(struct super_block *sb,
+                               void *buddy, void *bitmap, ext4_group_t group)
+ {
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+@@ -752,6 +752,7 @@ void ext4_mb_generate_buddy(struct super
+               grp->bb_free = free;
+               ext4_mark_group_bitmap_corrupted(sb, group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++              return -EIO;
+       }
+       mb_set_largest_free_order(sb, grp);
+@@ -762,6 +763,8 @@ void ext4_mb_generate_buddy(struct super
+       sbi->s_mb_buddies_generated++;
+       sbi->s_mb_generation_time += period;
+       spin_unlock(&sbi->s_bal_lock);
++
++      return 0;
+ }
+ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+@@ -882,7 +885,7 @@ static int ext4_mb_init_cache(struct pag
+       }
+       first_block = page->index * blocks_per_page;
+-      for (i = 0; i < blocks_per_page; i++) {
++      for (i = 0; i < blocks_per_page && err == 0; i++) {
+               group = (first_block + i) >> 1;
+               if (group >= ngroups)
+                       break;
+@@ -926,7 +929,7 @@ static int ext4_mb_init_cache(struct pag
+                       ext4_lock_group(sb, group);
+                       /* init the buddy */
+                       memset(data, 0xff, blocksize);
+-                      ext4_mb_generate_buddy(sb, data, incore, group);
++                      err = ext4_mb_generate_buddy(sb, data, incore, group);
+                       ext4_unlock_group(sb, group);
+                       incore = NULL;
+               } else {
+@@ -941,7 +944,7 @@ static int ext4_mb_init_cache(struct pag
+                       memcpy(data, bitmap, blocksize);
+                       /* mark all preallocated blks used in in-core bitmap */
+-                      ext4_mb_generate_from_pa(sb, data, group);
++                      err = ext4_mb_generate_from_pa(sb, data, group);
+                       ext4_mb_generate_from_freelist(sb, data, group);
+                       ext4_unlock_group(sb, group);
+@@ -951,7 +954,8 @@ static int ext4_mb_init_cache(struct pag
+                       incore = data;
+               }
+       }
+-      SetPageUptodate(page);
++      if (likely(err == 0))
++              SetPageUptodate(page);
+ out:
+       if (bh) {
+@@ -2280,9 +2284,11 @@ static void *ext4_mb_seq_groups_next(str
+ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ {
+       struct super_block *sb = PDE_DATA(file_inode(seq->file));
++      struct ext4_group_desc *gdp;
+       ext4_group_t group = (ext4_group_t) ((unsigned long) v);
+       int i;
+       int err, buddy_loaded = 0;
++      int free = 0;
+       struct ext4_buddy e4b;
+       struct ext4_group_info *grinfo;
+       unsigned char blocksize_bits = min_t(unsigned char,
+@@ -2295,7 +2301,7 @@ static int ext4_mb_seq_groups_show(struc
+       group--;
+       if (group == 0)
+-              seq_puts(seq, "#group: free  frags first ["
++              seq_puts(seq, "#group: bfree gfree frags first pa    ["
+                             " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
+                             " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
+@@ -2313,13 +2319,19 @@ static int ext4_mb_seq_groups_show(struc
+               buddy_loaded = 1;
+       }
++      gdp = ext4_get_group_desc(sb, group, NULL);
++      if (gdp != NULL)
++              free = ext4_free_group_clusters(sb, gdp);
++
+       memcpy(&sg, ext4_get_group_info(sb, group), i);
+       if (buddy_loaded)
+               ext4_mb_unload_buddy(&e4b);
+-      seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
+-                      sg.info.bb_fragments, sg.info.bb_first_free);
++      seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
++                      (long unsigned int)group, sg.info.bb_free, free,
++                      sg.info.bb_fragments, sg.info.bb_first_free,
++                      sg.info.bb_prealloc_nr);
+       for (i = 0; i <= 13; i++)
+               seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
+                               sg.info.bb_counters[i] : 0);
+@@ -3614,22 +3626,71 @@ static void ext4_mb_generate_from_freeli
+ }
+ /*
++ * check free blocks in bitmap match free block in group descriptor
++ * do this before taking preallocated blocks into account to be able
++ * to detect on-disk corruptions. The group lock should be hold by the
++ * caller.
++ */
++int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
++                              struct ext4_group_desc *gdp, int group)
++{
++      unsigned short max = EXT4_CLUSTERS_PER_GROUP(sb);
++      unsigned short i, first, free = 0;
++      unsigned short free_in_gdp = ext4_free_group_clusters(sb, gdp);
++
++      if (free_in_gdp == 0 && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
++              return 0;
++
++      i = mb_find_next_zero_bit(bitmap, max, 0);
++
++      while (i < max) {
++              first = i;
++              i = mb_find_next_bit(bitmap, max, i);
++              if (i > max)
++                      i = max;
++              free += i - first;
++              if (i < max)
++                      i = mb_find_next_zero_bit(bitmap, max, i);
++      }
++
++      if (free != free_in_gdp) {
++              ext4_error(sb, "on-disk bitmap for group %d"
++                      "corrupted: %u blocks free in bitmap, %u - in gd\n",
++                      group, free, free_in_gdp);
++              return -EIO;
++      }
++      return 0;
++}
++
++/*
+  * the function goes through all preallocation in this group and marks them
+  * used in in-core bitmap. buddy must be generated from this bitmap
+  * Need to be called with ext4 group lock held
+  */
+ static noinline_for_stack
+-void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group)
+ {
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+       struct ext4_prealloc_space *pa;
++      struct ext4_group_desc *gdp;
+       struct list_head *cur;
+       ext4_group_t groupnr;
+       ext4_grpblk_t start;
+       int preallocated = 0;
++      int skip = 0, count = 0;
++      int err;
+       int len;
++      gdp = ext4_get_group_desc(sb, group, NULL);
++      if (gdp == NULL)
++              return -EIO;
++
++      /* before applying preallocations, check bitmap consistency */
++      err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
++      if (err)
++              return err;
++
+       /* all form of preallocation discards first load group,
+        * so the only competing code is preallocation use.
+        * we don't need any locking here
+@@ -3645,13 +3706,23 @@ void ext4_mb_generate_from_pa(struct sup
+                                            &groupnr, &start);
+               len = pa->pa_len;
+               spin_unlock(&pa->pa_lock);
+-              if (unlikely(len == 0))
++              if (unlikely(len == 0)) {
++                      skip++;
+                       continue;
++              }
+               BUG_ON(groupnr != group);
+               ext4_set_bits(bitmap, start, len);
+               preallocated += len;
++              count++;
++      }
++      if (count + skip != grp->bb_prealloc_nr) {
++              ext4_error(sb, "lost preallocations: "
++                         "count %d, bb_prealloc_nr %lu, skip %d\n",
++                         count, grp->bb_prealloc_nr, skip);
++              return -EIO;
+       }
+       mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
++      return 0;
+ }
+ static void ext4_mb_pa_callback(struct rcu_head *head)
+@@ -3715,6 +3786,7 @@ static void ext4_mb_put_pa(struct ext4_a
+        */
+       ext4_lock_group(sb, grp);
+       list_del(&pa->pa_group_list);
++      ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
+       ext4_unlock_group(sb, grp);
+       spin_lock(pa->pa_obj_lock);
+@@ -3809,6 +3881,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+       ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+       list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++      grp->bb_prealloc_nr++;
+       ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+       spin_lock(pa->pa_obj_lock);
+@@ -3870,6 +3943,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+       ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+       list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++      grp->bb_prealloc_nr++;
+       ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+       /*
+@@ -4041,6 +4115,8 @@ repeat:
+               spin_unlock(&pa->pa_lock);
++              BUG_ON(grp->bb_prealloc_nr == 0);
++              grp->bb_prealloc_nr--;
+               list_del(&pa->pa_group_list);
+               list_add(&pa->u.pa_tmp_list, &list);
+       }
+@@ -4171,7 +4247,7 @@ repeat:
+               if (err) {
+                       ext4_error(sb, "Error %d loading buddy information for %u",
+                                  err, group);
+-                      continue;
++                      return;
+               }
+               bitmap_bh = ext4_read_block_bitmap(sb, group);
+@@ -4184,6 +4260,8 @@ repeat:
+               }
+               ext4_lock_group(sb, group);
++              BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
++              e4b.bd_info->bb_prealloc_nr--;
+               list_del(&pa->pa_group_list);
+               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
+               ext4_unlock_group(sb, group);
+@@ -4446,6 +4524,7 @@ ext4_mb_discard_lg_preallocations(struct
+               }
+               ext4_lock_group(sb, group);
+               list_del(&pa->pa_group_list);
++              ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+               ext4_mb_release_group_pa(&e4b, pa);
+               ext4_unlock_group(sb, group);
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.h
+@@ -70,7 +70,7 @@ do {                                                                 \
+ /*
+  * for which requests use 2^N search using buddies
+  */
+-#define MB_DEFAULT_ORDER2_REQS                2
++#define MB_DEFAULT_ORDER2_REQS                8
+ /*
+  * default group prealloc size 512 blocks
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-misc.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-misc.patch
new file mode 100644 (file)
index 0000000..d15042c
--- /dev/null
@@ -0,0 +1,185 @@
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+@@ -1591,6 +1591,8 @@ static inline void ext4_clear_state_flag
+  */
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
++#define JOURNAL_START_HAS_3ARGS       1
++
+ /*
+  * Codes for operating systems
+  */
+@@ -1805,7 +1807,21 @@ static inline bool ext4_has_unknown_ext#
+ EXTN_FEATURE_FUNCS(2)
+ EXTN_FEATURE_FUNCS(3)
+-EXTN_FEATURE_FUNCS(4)
++static inline bool ext4_has_unknown_ext4_compat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_compat &
++              cpu_to_le32(~EXT4_FEATURE_COMPAT_SUPP)) != 0);
++}
++static inline bool ext4_has_unknown_ext4_ro_compat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_ro_compat &
++              cpu_to_le32(~EXT4_FEATURE_RO_COMPAT_SUPP)) != 0);
++}
++static inline bool ext4_has_unknown_ext4_incompat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_incompat &
++              cpu_to_le32(~EXT4_FEATURE_INCOMPAT_SUPP)) != 0);
++}
+ static inline bool ext4_has_compat_features(struct super_block *sb)
+ {
+@@ -3111,6 +3127,11 @@ struct ext4_extent;
+ extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
+ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
++                                                ext4_group_t block_group);
++extern struct buffer_head *ext4_append(handle_t *handle,
++                                     struct inode *inode,
++                                     ext4_lblk_t *block);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
+ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+                              struct ext4_map_blocks *map, int flags);
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ialloc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ialloc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ialloc.c
+@@ -114,7 +114,7 @@ verified:
+  *
+  * Return buffer_head of bitmap on success or NULL.
+  */
+-static struct buffer_head *
++struct buffer_head *
+ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ {
+       struct ext4_group_desc *desc;
+@@ -211,6 +211,7 @@ out:
+       put_bh(bh);
+       return ERR_PTR(err);
+ }
++EXPORT_SYMBOL(ext4_read_inode_bitmap);
+ /*
+  * NOTE! When we get the inode, we're the only people
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/inode.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
+@@ -6267,3 +6267,18 @@ int ext4_filemap_fault(struct vm_fault *
+       return err;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++EXPORT_SYMBOL(ext4_truncate);
++EXPORT_SYMBOL(ext4_iget);
++EXPORT_SYMBOL(ext4_bread);
++EXPORT_SYMBOL(ext4_itable_unused_count);
++EXPORT_SYMBOL(ext4_force_commit);
++EXPORT_SYMBOL(ext4_mark_inode_dirty);
++EXPORT_SYMBOL(ext4_get_group_desc);
++EXPORT_SYMBOL(__ext4_journal_get_write_access);
++EXPORT_SYMBOL(__ext4_journal_start_sb);
++EXPORT_SYMBOL(__ext4_journal_stop);
++EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
++EXPORT_SYMBOL(__ext4_std_error);
++EXPORT_SYMBOL(ext4fs_dirhash);
++EXPORT_SYMBOL(ext4_get_inode_loc);
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/namei.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/namei.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/namei.c
+@@ -49,7 +49,7 @@
+ #define NAMEI_RA_BLOCKS  4
+ #define NAMEI_RA_SIZE      (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+-static struct buffer_head *ext4_append(handle_t *handle,
++struct buffer_head *ext4_append(handle_t *handle,
+                                       struct inode *inode,
+                                       ext4_lblk_t *block)
+ {
+@@ -160,6 +160,7 @@ static struct buffer_head *__ext4_read_d
+       }
+       return bh;
+ }
++EXPORT_SYMBOL(ext4_append);
+ #ifndef assert
+ #define assert(test) J_ASSERT(test)
+@@ -2415,23 +2416,25 @@ EXPORT_SYMBOL(ext4_delete_entry);
+  * for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set
+  * on regular files) and to avoid creating huge/slow non-HTREE directories.
+  */
+-static void ext4_inc_count(handle_t *handle, struct inode *inode)
++void ext4_inc_count(handle_t *handle, struct inode *inode)
+ {
+       inc_nlink(inode);
+       if (is_dx(inode) &&
+           (inode->i_nlink > EXT4_LINK_MAX || inode->i_nlink == 2))
+               set_nlink(inode, 1);
+ }
++EXPORT_SYMBOL(ext4_inc_count);
+ /*
+  * If a directory had nlink == 1, then we should let it be 1. This indicates
+  * directory has >EXT4_LINK_MAX subdirs.
+  */
+-static void ext4_dec_count(handle_t *handle, struct inode *inode)
++void ext4_dec_count(handle_t *handle, struct inode *inode)
+ {
+       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
+               drop_nlink(inode);
+ }
++EXPORT_SYMBOL(ext4_dec_count);
+ static int ext4_add_nondir(handle_t *handle,
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/super.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
+@@ -323,11 +323,11 @@ static void __save_error_info(struct sup
+               return;
+       es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+       es->s_last_error_time = cpu_to_le32(get_seconds());
+-      strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
++      strlcpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
+       es->s_last_error_line = cpu_to_le32(line);
+       if (!es->s_first_error_time) {
+               es->s_first_error_time = es->s_last_error_time;
+-              strncpy(es->s_first_error_func, func,
++              strlcpy(es->s_first_error_func, func,
+                       sizeof(es->s_first_error_func));
+               es->s_first_error_line = cpu_to_le32(line);
+               es->s_first_error_ino = es->s_last_error_ino;
+@@ -5957,16 +5957,12 @@ static int __init ext4_init_fs(void)
+       err = init_inodecache();
+       if (err)
+               goto out1;
+-      register_as_ext3();
+-      register_as_ext2();
+       err = register_filesystem(&ext4_fs_type);
+       if (err)
+               goto out;
+       return 0;
+ out:
+-      unregister_as_ext2();
+-      unregister_as_ext3();
+       destroy_inodecache();
+ out1:
+       ext4_exit_mballoc();
+@@ -5985,8 +5981,6 @@ out5:
+ static void __exit ext4_exit_fs(void)
+ {
+       ext4_destroy_lazyinit_thread();
+-      unregister_as_ext2();
+-      unregister_as_ext3();
+       unregister_filesystem(&ext4_fs_type);
+       destroy_inodecache();
+       ext4_exit_mballoc();
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-nocmtime.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-nocmtime.patch
new file mode 100644 (file)
index 0000000..dc4712c
--- /dev/null
@@ -0,0 +1,18 @@
+Index: linux-4.18.0-32.el8.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-32.el8.x86_64.orig/fs/ext4/ext4.h
++++ linux-4.18.0-32.el8.x86_64/fs/ext4/ext4.h
+@@ -655,6 +655,13 @@ enum {
+ #define EXT4_GOING_FLAGS_LOGFLUSH             0x1     /* flush log but not data */
+ #define EXT4_GOING_FLAGS_NOLOGFLUSH           0x2     /* don't flush log nor data */
++static inline struct timespec64 ext4_current_time(struct inode *inode)
++{
++      if (IS_NOCMTIME(inode))
++              return inode->i_ctime;
++      return current_time(inode);
++}
++#define current_time(a) ext4_current_time(a)
+ #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+ /*
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-pdirop.patch
new file mode 100644 (file)
index 0000000..b47982f
--- /dev/null
@@ -0,0 +1,1942 @@
+Single directory performance is a critical for HPC workloads. In a
+typical use case an application creates a separate output file for
+each node and task in a job. As nodes and tasks increase, hundreds
+of thousands of files may be created in a single directory within
+a short window of time.
+Today, both filename lookup and file system modifying operations
+(such as create and unlink) are protected with a single lock for
+an entire ldiskfs directory. PDO project will remove this
+bottleneck by introducing a parallel locking mechanism for entire
+ldiskfs directories. This work will enable multiple application
+threads to simultaneously lookup, create and unlink in parallel.
+
+This patch contains:
+ - pdirops support for ldiskfs
+ - integrate with osd-ldiskfs
+
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/Makefile
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/Makefile
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
+ ext4-y        := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
+               extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
++              htree_lock.o \
+               indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
+               mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
+               super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+@@ -29,6 +29,7 @@
+ #include <linux/timer.h>
+ #include <linux/version.h>
+ #include <linux/wait.h>
++#include <linux/htree_lock.h>
+ #include <linux/sched/signal.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+@@ -934,6 +935,9 @@ struct ext4_inode_info {
+       __u32   i_dtime;
+       ext4_fsblk_t    i_file_acl;
++      /* following fields for parallel directory operations -bzzz */
++      struct semaphore i_append_sem;
++
+       /*
+        * i_block_group is the number of the block group which contains
+        * this file's inode.  Constant across the lifetime of the inode,
+@@ -2109,6 +2113,72 @@ struct dx_hash_info
+  */
+ #define HASH_NB_ALWAYS                1
++/* assume name-hash is protected by upper layer */
++#define EXT4_HTREE_LOCK_HASH  0
++
++enum ext4_pdo_lk_types {
++#if EXT4_HTREE_LOCK_HASH
++      EXT4_LK_HASH,
++#endif
++      EXT4_LK_DX,             /* index block */
++      EXT4_LK_DE,             /* directory entry block */
++      EXT4_LK_SPIN,           /* spinlock */
++      EXT4_LK_MAX,
++};
++
++/* read-only bit */
++#define EXT4_LB_RO(b)         (1 << (b))
++/* read + write, high bits for writer */
++#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
++
++enum ext4_pdo_lock_bits {
++      /* DX lock bits */
++      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
++      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
++      /* DE lock bits */
++      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
++      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
++      /* DX spinlock bits */
++      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
++      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
++      /* accurate searching */
++      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
++};
++
++enum ext4_pdo_lock_opc {
++      /* external */
++      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
++      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
++
++      /* internal */
++      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
++      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
++};
++
++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
++#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
++
++extern struct htree_lock *ext4_htree_lock_alloc(void);
++#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
++
++extern void ext4_htree_lock(struct htree_lock *lck,
++                          struct htree_lock_head *lhead,
++                          struct inode *dir, unsigned flags);
++#define ext4_htree_unlock(lck)                  htree_unlock(lck)
++
++extern struct buffer_head *__ext4_find_entry(struct inode *dir,
++                                      const struct qstr *d_name,
++                                      struct ext4_dir_entry_2 **res_dir,
++                                      int *inlined, struct htree_lock *lck);
++extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
++                    struct inode *inode, struct htree_lock *lck);
++
+ struct ext4_filename {
+       const struct qstr *usr_fname;
+       struct fscrypt_str disk_name;
+@@ -2416,8 +2486,16 @@ void ext4_insert_dentry(struct inode *in
+                       struct ext4_filename *fname, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
++      /* Disable it for ldiskfs, because going from a DX directory to
++       * a non-DX directory while it is in use will completely break
++       * the htree-locking.
++       * If we really want to support this operation in the future,
++       * we need to exclusively lock the directory at here which will
++       * increase complexity of code */
++#if 0
+       if (!ext4_has_feature_dir_index(inode->i_sb))
+               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++#endif
+ }
+ static const unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/htree_lock.c
+===================================================================
+--- /dev/null
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/htree_lock.c
+@@ -0,0 +1,891 @@
++/*
++ * fs/ext4/htree_lock.c
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++#include <linux/jbd2.h>
++#include <linux/hash.h>
++#include <linux/module.h>
++#include <linux/htree_lock.h>
++
++enum {
++      HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
++      HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
++      HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
++      HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
++      HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
++};
++
++enum {
++      HTREE_LOCK_COMPAT_EX    = 0,
++      HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
++      HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
++      HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
++      HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
++                                HTREE_LOCK_BIT_PW,
++};
++
++static int htree_lock_compat[] = {
++      [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
++      [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
++      [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
++      [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
++      [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
++};
++
++/* max allowed htree-lock depth.
++ * We only need depth=3 for ext4 although user can have higher value. */
++#define HTREE_LOCK_DEP_MAX    16
++
++#ifdef HTREE_LOCK_DEBUG
++
++static char *hl_name[] = {
++      [HTREE_LOCK_EX]         "EX",
++      [HTREE_LOCK_PW]         "PW",
++      [HTREE_LOCK_PR]         "PR",
++      [HTREE_LOCK_CW]         "CW",
++      [HTREE_LOCK_CR]         "CR",
++};
++
++/* lock stats */
++struct htree_lock_node_stats {
++      unsigned long long      blocked[HTREE_LOCK_MAX];
++      unsigned long long      granted[HTREE_LOCK_MAX];
++      unsigned long long      retried[HTREE_LOCK_MAX];
++      unsigned long long      events;
++};
++
++struct htree_lock_stats {
++      struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
++      unsigned long long      granted[HTREE_LOCK_MAX];
++      unsigned long long      blocked[HTREE_LOCK_MAX];
++};
++
++static struct htree_lock_stats hl_stats;
++
++void htree_lock_stat_reset(void)
++{
++      memset(&hl_stats, 0, sizeof(hl_stats));
++}
++
++void htree_lock_stat_print(int depth)
++{
++      int     i;
++      int     j;
++
++      printk(KERN_DEBUG "HTREE LOCK STATS:\n");
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
++                     hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
++      }
++      for (i = 0; i < depth; i++) {
++              printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
++              for (j = 0; j < HTREE_LOCK_MAX; j++) {
++                      printk(KERN_DEBUG
++                              "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
++                              hl_name[j], hl_stats.nodes[i].granted[j],
++                              hl_stats.nodes[i].blocked[j],
++                              hl_stats.nodes[i].retried[j]);
++              }
++      }
++}
++
++#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
++#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
++#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
++#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
++#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
++#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
++
++#else /* !DEBUG */
++
++void htree_lock_stat_reset(void) {}
++void htree_lock_stat_print(int depth) {}
++
++#define lk_grant_inc(m)             do {} while (0)
++#define lk_block_inc(m)             do {} while (0)
++#define ln_grant_inc(d, m)    do {} while (0)
++#define ln_block_inc(d, m)    do {} while (0)
++#define ln_retry_inc(d, m)    do {} while (0)
++#define ln_event_inc(d)             do {} while (0)
++
++#endif /* DEBUG */
++
++EXPORT_SYMBOL(htree_lock_stat_reset);
++EXPORT_SYMBOL(htree_lock_stat_print);
++
++#define HTREE_DEP_ROOT                  (-1)
++
++#define htree_spin_lock(lhead, dep)                           \
++      bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
++#define htree_spin_unlock(lhead, dep)                         \
++      bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
++
++#define htree_key_event_ignore(child, ln)                     \
++      (!((child)->lc_events & (1 << (ln)->ln_mode)))
++
++static int
++htree_key_list_empty(struct htree_lock_node *ln)
++{
++      return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
++}
++
++static void
++htree_key_list_del_init(struct htree_lock_node *ln)
++{
++      struct htree_lock_node *tmp = NULL;
++
++      if (!list_empty(&ln->ln_minor_list)) {
++              tmp = list_entry(ln->ln_minor_list.next,
++                               struct htree_lock_node, ln_minor_list);
++              list_del_init(&ln->ln_minor_list);
++      }
++
++      if (list_empty(&ln->ln_major_list))
++              return;
++
++      if (tmp == NULL) { /* not on minor key list */
++              list_del_init(&ln->ln_major_list);
++      } else {
++              BUG_ON(!list_empty(&tmp->ln_major_list));
++              list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
++      }
++}
++
++static void
++htree_key_list_replace_init(struct htree_lock_node *old,
++                          struct htree_lock_node *new)
++{
++      if (!list_empty(&old->ln_major_list))
++              list_replace_init(&old->ln_major_list, &new->ln_major_list);
++
++      if (!list_empty(&old->ln_minor_list))
++              list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
++}
++
++static void
++htree_key_event_enqueue(struct htree_lock_child *child,
++                      struct htree_lock_node *ln, int dep, void *event)
++{
++      struct htree_lock_node *tmp;
++
++      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++      BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
++      if (event == NULL || htree_key_event_ignore(child, ln))
++              return;
++
++      /* shouldn't be a very long list */
++      list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
++              if (tmp->ln_mode == HTREE_LOCK_NL) {
++                      ln_event_inc(dep);
++                      if (child->lc_callback != NULL)
++                              child->lc_callback(tmp->ln_ev_target, event);
++              }
++      }
++}
++
++static int
++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
++                      unsigned dep, int wait, void *event)
++{
++      struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
++      struct htree_lock_node *newln = &newlk->lk_nodes[dep];
++      struct htree_lock_node *curln = &curlk->lk_nodes[dep];
++
++      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++      /* NB: we only expect PR/PW lock mode at here, only these two modes are
++       * allowed for htree_node_lock(asserted in htree_node_lock_internal),
++       * NL is only used for listener, user can't directly require NL mode */
++      if ((curln->ln_mode == HTREE_LOCK_NL) ||
++          (curln->ln_mode != HTREE_LOCK_PW &&
++           newln->ln_mode != HTREE_LOCK_PW)) {
++              /* no conflict, attach it on granted list of @curlk */
++              if (curln->ln_mode != HTREE_LOCK_NL) {
++                      list_add(&newln->ln_granted_list,
++                               &curln->ln_granted_list);
++              } else {
++                      /* replace key owner */
++                      htree_key_list_replace_init(curln, newln);
++              }
++
++              list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++              htree_key_event_enqueue(child, newln, dep, event);
++              ln_grant_inc(dep, newln->ln_mode);
++              return 1; /* still hold lh_lock */
++      }
++
++      if (!wait) { /* can't grant and don't want to wait */
++              ln_retry_inc(dep, newln->ln_mode);
++              newln->ln_mode = HTREE_LOCK_INVAL;
++              return -1; /* don't wait and just return -1 */
++      }
++
++      newlk->lk_task = current;
++      set_current_state(TASK_UNINTERRUPTIBLE);
++      /* conflict, attach it on blocked list of curlk */
++      list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
++      list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++      ln_block_inc(dep, newln->ln_mode);
++
++      htree_spin_unlock(newlk->lk_head, dep);
++      /* wait to be given the lock */
++      if (newlk->lk_task != NULL)
++              schedule();
++      /* granted, no doubt, wake up will set me RUNNING */
++      if (event == NULL || htree_key_event_ignore(child, newln))
++              return 0; /* granted without lh_lock */
++
++      htree_spin_lock(newlk->lk_head, dep);
++      htree_key_event_enqueue(child, newln, dep, event);
++      return 1; /* still hold lh_lock */
++}
++
++/*
++ * get PR/PW access to particular tree-node according to @dep and @key,
++ * it will return -1 if @wait is false and can't immediately grant this lock.
++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
++ * @event if it's not NULL.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
++                       htree_lock_mode_t mode, u32 key, unsigned dep,
++                       int wait, void *event)
++{
++      LIST_HEAD(list);
++      struct htree_lock       *tmp;
++      struct htree_lock       *tmp2;
++      u16                     major;
++      u16                     minor;
++      u8                      reverse;
++      u8                      ma_bits;
++      u8                      mi_bits;
++
++      BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
++      BUG_ON(htree_node_is_granted(lck, dep));
++
++      key = hash_long(key, lhead->lh_hbits);
++
++      mi_bits = lhead->lh_hbits >> 1;
++      ma_bits = lhead->lh_hbits - mi_bits;
++
++      lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
++      lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
++      lck->lk_nodes[dep].ln_mode = mode;
++
++      /*
++       * The major key list is an ordered list, so searches are started
++       * at the end of the list that is numerically closer to major_key,
++       * so at most half of the list will be walked (for well-distributed
++       * keys). The list traversal aborts early if the expected key
++       * location is passed.
++       */
++      reverse = (major >= (1 << (ma_bits - 1)));
++
++      if (reverse) {
++              list_for_each_entry_reverse(tmp,
++                                      &lhead->lh_children[dep].lc_list,
++                                      lk_nodes[dep].ln_major_list) {
++                      if (tmp->lk_nodes[dep].ln_major_key == major) {
++                              goto search_minor;
++
++                      } else if (tmp->lk_nodes[dep].ln_major_key < major) {
++                              /* attach _after_ @tmp */
++                              list_add(&lck->lk_nodes[dep].ln_major_list,
++                                       &tmp->lk_nodes[dep].ln_major_list);
++                              goto out_grant_major;
++                      }
++              }
++
++              list_add(&lck->lk_nodes[dep].ln_major_list,
++                       &lhead->lh_children[dep].lc_list);
++              goto out_grant_major;
++
++      } else {
++              list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
++                                  lk_nodes[dep].ln_major_list) {
++                      if (tmp->lk_nodes[dep].ln_major_key == major) {
++                              goto search_minor;
++
++                      } else if (tmp->lk_nodes[dep].ln_major_key > major) {
++                              /* insert _before_ @tmp */
++                              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++                                      &tmp->lk_nodes[dep].ln_major_list);
++                              goto out_grant_major;
++                      }
++              }
++
++              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++                            &lhead->lh_children[dep].lc_list);
++              goto out_grant_major;
++      }
++
++ search_minor:
++      /*
++       * NB: minor_key list doesn't have a "head", @list is just a
++       * temporary stub for helping list searching, make sure it's removed
++       * after searching.
++       * minor_key list is an ordered list too.
++       */
++      list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
++
++      reverse = (minor >= (1 << (mi_bits - 1)));
++
++      if (reverse) {
++              list_for_each_entry_reverse(tmp2, &list,
++                                          lk_nodes[dep].ln_minor_list) {
++                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++                              goto out_enqueue;
++
++                      } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
++                              /* attach _after_ @tmp2 */
++                              list_add(&lck->lk_nodes[dep].ln_minor_list,
++                                       &tmp2->lk_nodes[dep].ln_minor_list);
++                              goto out_grant_minor;
++                      }
++              }
++
++              list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
++
++      } else {
++              list_for_each_entry(tmp2, &list,
++                                  lk_nodes[dep].ln_minor_list) {
++                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++                              goto out_enqueue;
++
++                      } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
++                              /* insert _before_ @tmp2 */
++                              list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
++                                      &tmp2->lk_nodes[dep].ln_minor_list);
++                              goto out_grant_minor;
++                      }
++              }
++
++              list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
++      }
++
++ out_grant_minor:
++      if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
++              /* new lock @lck is the first one on minor_key list, which
++               * means it has the smallest minor_key and it should
++               * replace @tmp as minor_key owner */
++              list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
++                                &lck->lk_nodes[dep].ln_major_list);
++      }
++      /* remove the temporary head */
++      list_del(&list);
++
++ out_grant_major:
++      ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
++      return 1; /* granted with holding lh_lock */
++
++ out_enqueue:
++      list_del(&list); /* remove temprary head */
++      return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
++}
++
++/*
++ * release the key of @lck at level @dep, and grant any blocked locks.
++ * caller will still listen on @key if @event is not NULL, which means
++ * caller can see a event (by event_cb) while granting any lock with
++ * the same key at level @dep.
++ * NB: ALWAYS called holding lhead::lh_lock
++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
++ */
++static void
++htree_node_unlock_internal(struct htree_lock_head *lhead,
++                         struct htree_lock *curlk, unsigned dep, void *event)
++{
++      struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
++      struct htree_lock       *grtlk = NULL;
++      struct htree_lock_node  *grtln;
++      struct htree_lock       *poslk;
++      struct htree_lock       *tmplk;
++
++      if (!htree_node_is_granted(curlk, dep))
++              return;
++
++      if (!list_empty(&curln->ln_granted_list)) {
++              /* there is another granted lock */
++              grtlk = list_entry(curln->ln_granted_list.next,
++                                 struct htree_lock,
++                                 lk_nodes[dep].ln_granted_list);
++              list_del_init(&curln->ln_granted_list);
++      }
++
++      if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
++              /*
++               * @curlk is the only granted lock, so we confirmed:
++               * a) curln is key owner (attached on major/minor_list),
++               *    so if there is any blocked lock, it should be attached
++               *    on curln->ln_blocked_list
++               * b) we always can grant the first blocked lock
++               */
++              grtlk = list_entry(curln->ln_blocked_list.next,
++                                 struct htree_lock,
++                                 lk_nodes[dep].ln_blocked_list);
++              BUG_ON(grtlk->lk_task == NULL);
++              wake_up_process(grtlk->lk_task);
++      }
++
++      if (event != NULL &&
++          lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
++              curln->ln_ev_target = event;
++              curln->ln_mode = HTREE_LOCK_NL; /* listen! */
++      } else {
++              curln->ln_mode = HTREE_LOCK_INVAL;
++      }
++
++      if (grtlk == NULL) { /* I must be the only one locking this key */
++              struct htree_lock_node *tmpln;
++
++              BUG_ON(htree_key_list_empty(curln));
++
++              if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
++                      return;
++
++              /* not listening */
++              if (list_empty(&curln->ln_alive_list)) { /* no more listener */
++                      htree_key_list_del_init(curln);
++                      return;
++              }
++
++              tmpln = list_entry(curln->ln_alive_list.next,
++                                 struct htree_lock_node, ln_alive_list);
++
++              BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
++
++              htree_key_list_replace_init(curln, tmpln);
++              list_del_init(&curln->ln_alive_list);
++
++              return;
++      }
++
++      /* have a granted lock */
++      grtln = &grtlk->lk_nodes[dep];
++      if (!list_empty(&curln->ln_blocked_list)) {
++              /* only key owner can be on both lists */
++              BUG_ON(htree_key_list_empty(curln));
++
++              if (list_empty(&grtln->ln_blocked_list)) {
++                      list_add(&grtln->ln_blocked_list,
++                               &curln->ln_blocked_list);
++              }
++              list_del_init(&curln->ln_blocked_list);
++      }
++      /*
++       * NB: this is the tricky part:
++       * We have only two modes for child-lock (PR and PW), also,
++       * only owner of the key (attached on major/minor_list) can be on
++       * both blocked_list and granted_list, so @grtlk must be one
++       * of these two cases:
++       *
++       * a) @grtlk is taken from granted_list, which means we've granted
++       *    more than one lock so @grtlk has to be PR, the first blocked
++       *    lock must be PW and we can't grant it at all.
++       *    So even @grtlk is not owner of the key (empty blocked_list),
++       *    we don't care because we can't grant any lock.
++       * b) we just grant a new lock which is taken from head of blocked
++       *    list, and it should be the first granted lock, and it should
++       *    be the first one linked on blocked_list.
++       *
++       * Either way, we can get correct result by iterating blocked_list
++       * of @grtlk, and don't have to bother on how to find out
++       * owner of current key.
++       */
++      list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
++                               lk_nodes[dep].ln_blocked_list) {
++              if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
++                  poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
++                      break;
++              /* grant all readers */
++              list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
++              list_add(&poslk->lk_nodes[dep].ln_granted_list,
++                       &grtln->ln_granted_list);
++
++              BUG_ON(poslk->lk_task == NULL);
++              wake_up_process(poslk->lk_task);
++      }
++
++      /* if @curln is the owner of this key, replace it with @grtln */
++      if (!htree_key_list_empty(curln))
++              htree_key_list_replace_init(curln, grtln);
++
++      if (curln->ln_mode == HTREE_LOCK_INVAL)
++              list_del_init(&curln->ln_alive_list);
++}
++
++/*
++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
++ * and 0 only if @wait is false and can't grant it immediately
++ */
++int
++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++                  u32 key, unsigned dep, int wait, void *event)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int rc;
++
++      BUG_ON(dep >= lck->lk_depth);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_spin_lock(lhead, dep);
++      rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
++      if (rc != 0)
++              htree_spin_unlock(lhead, dep);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_node_lock_try);
++
++/* it's wrapper of htree_node_unlock_internal */
++void
++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++
++      BUG_ON(dep >= lck->lk_depth);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_spin_lock(lhead, dep);
++      htree_node_unlock_internal(lhead, lck, dep, event);
++      htree_spin_unlock(lhead, dep);
++}
++EXPORT_SYMBOL(htree_node_unlock);
++
++/* stop listening on child-lock level @dep */
++void
++htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
++{
++      struct htree_lock_node *ln = &lck->lk_nodes[dep];
++      struct htree_lock_node *tmp;
++
++      BUG_ON(htree_node_is_granted(lck, dep));
++      BUG_ON(!list_empty(&ln->ln_blocked_list));
++      BUG_ON(!list_empty(&ln->ln_granted_list));
++
++      if (!htree_node_is_listening(lck, dep))
++              return;
++
++      htree_spin_lock(lck->lk_head, dep);
++      ln->ln_mode = HTREE_LOCK_INVAL;
++      ln->ln_ev_target = NULL;
++
++      if (htree_key_list_empty(ln)) { /* not owner */
++              list_del_init(&ln->ln_alive_list);
++              goto out;
++      }
++
++      /* I'm the owner... */
++      if (list_empty(&ln->ln_alive_list)) { /* no more listener */
++              htree_key_list_del_init(ln);
++              goto out;
++      }
++
++      tmp = list_entry(ln->ln_alive_list.next,
++                       struct htree_lock_node, ln_alive_list);
++
++      BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
++      htree_key_list_replace_init(ln, tmp);
++      list_del_init(&ln->ln_alive_list);
++ out:
++      htree_spin_unlock(lck->lk_head, dep);
++}
++EXPORT_SYMBOL(htree_node_stop_listen);
++
++/* release all child-locks if we have any */
++static void
++htree_node_release_all(struct htree_lock *lck)
++{
++      int     i;
++
++      for (i = 0; i < lck->lk_depth; i++) {
++              if (htree_node_is_granted(lck, i))
++                      htree_node_unlock(lck, i, NULL);
++              else if (htree_node_is_listening(lck, i))
++                      htree_node_stop_listen(lck, i);
++      }
++}
++
++/*
++ * obtain htree lock, it could be blocked inside if there's conflict
++ * with any granted or blocked lock and @wait is true.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_lock_internal(struct htree_lock *lck, int wait)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int     granted = 0;
++      int     blocked = 0;
++      int     i;
++
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              if (lhead->lh_ngranted[i] != 0)
++                      granted |= 1 << i;
++              if (lhead->lh_nblocked[i] != 0)
++                      blocked |= 1 << i;
++      }
++      if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
++          (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
++              /* will block current lock even it just conflicts with any
++               * other blocked lock, so lock like EX wouldn't starve */
++              if (!wait)
++                      return -1;
++              lhead->lh_nblocked[lck->lk_mode]++;
++              lk_block_inc(lck->lk_mode);
++
++              lck->lk_task = current;
++              list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
++
++retry:
++              set_current_state(TASK_UNINTERRUPTIBLE);
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++              /* wait to be given the lock */
++              if (lck->lk_task != NULL)
++                      schedule();
++              /* granted, no doubt. wake up will set me RUNNING.
++               * Since thread would be waken up accidentally,
++               * so we need check lock whether granted or not again. */
++              if (!list_empty(&lck->lk_blocked_list)) {
++                      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++                      if (list_empty(&lck->lk_blocked_list)) {
++                              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++                              return 0;
++                      }
++                      goto retry;
++              }
++              return 0; /* without lh_lock */
++      }
++      lhead->lh_ngranted[lck->lk_mode]++;
++      lk_grant_inc(lck->lk_mode);
++      return 1;
++}
++
++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
++static void
++htree_unlock_internal(struct htree_lock *lck)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      struct htree_lock *tmp;
++      struct htree_lock *tmp2;
++      int granted = 0;
++      int i;
++
++      BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
++
++      lhead->lh_ngranted[lck->lk_mode]--;
++      lck->lk_mode = HTREE_LOCK_INVAL;
++
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              if (lhead->lh_ngranted[i] != 0)
++                      granted |= 1 << i;
++      }
++      list_for_each_entry_safe(tmp, tmp2,
++                               &lhead->lh_blocked_list, lk_blocked_list) {
++              /* conflict with any granted lock? */
++              if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
++                      break;
++
++              list_del_init(&tmp->lk_blocked_list);
++
++              BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
++
++              lhead->lh_nblocked[tmp->lk_mode]--;
++              lhead->lh_ngranted[tmp->lk_mode]++;
++              granted |= 1 << tmp->lk_mode;
++
++              BUG_ON(tmp->lk_task == NULL);
++              wake_up_process(tmp->lk_task);
++      }
++}
++
++/* it's wrapper of htree_lock_internal and exported interface.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++             htree_lock_mode_t mode, int wait)
++{
++      int     rc;
++
++      BUG_ON(lck->lk_depth > lhead->lh_depth);
++      BUG_ON(lck->lk_head != NULL);
++      BUG_ON(lck->lk_task != NULL);
++
++      lck->lk_head = lhead;
++      lck->lk_mode = mode;
++
++      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++      rc = htree_lock_internal(lck, wait);
++      if (rc != 0)
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_lock_try);
++
++/* it's wrapper of htree_unlock_internal and exported interface.
++ * It will release all htree_node_locks and htree_lock */
++void
++htree_unlock(struct htree_lock *lck)
++{
++      BUG_ON(lck->lk_head == NULL);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_node_release_all(lck);
++
++      htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
++      htree_unlock_internal(lck);
++      htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
++      lck->lk_head = NULL;
++      lck->lk_task = NULL;
++}
++EXPORT_SYMBOL(htree_unlock);
++
++/* change lock mode */
++void
++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
++{
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++      lck->lk_mode = mode;
++}
++EXPORT_SYMBOL(htree_change_mode);
++
++/* release htree lock, and lock it again with new mode.
++ * This function will first release all htree_node_locks and htree_lock,
++ * then try to gain htree_lock with new @mode.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int rc;
++
++      BUG_ON(lhead == NULL);
++      BUG_ON(lck->lk_mode == mode);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
++
++      htree_node_release_all(lck);
++
++      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++      htree_unlock_internal(lck);
++      lck->lk_mode = mode;
++      rc = htree_lock_internal(lck, wait);
++      if (rc != 0)
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_change_lock_try);
++
++/* create a htree_lock head with @depth levels (number of child-locks),
++ * it is a per resoruce structure */
++struct htree_lock_head *
++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
++{
++      struct htree_lock_head *lhead;
++      int  i;
++
++      if (depth > HTREE_LOCK_DEP_MAX) {
++              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++                      depth, HTREE_LOCK_DEP_MAX);
++              return NULL;
++      }
++
++      lhead = kzalloc(offsetof(struct htree_lock_head,
++                               lh_children[depth]) + priv, GFP_NOFS);
++      if (lhead == NULL)
++              return NULL;
++
++      if (hbits < HTREE_HBITS_MIN)
++              lhead->lh_hbits = HTREE_HBITS_MIN;
++      else if (hbits > HTREE_HBITS_MAX)
++              lhead->lh_hbits = HTREE_HBITS_MAX;
++
++      lhead->lh_lock = 0;
++      lhead->lh_depth = depth;
++      INIT_LIST_HEAD(&lhead->lh_blocked_list);
++      if (priv > 0) {
++              lhead->lh_private = (void *)lhead +
++                      offsetof(struct htree_lock_head, lh_children[depth]);
++      }
++
++      for (i = 0; i < depth; i++) {
++              INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
++              lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
++      }
++      return lhead;
++}
++EXPORT_SYMBOL(htree_lock_head_alloc);
++
++/* free the htree_lock head */
++void
++htree_lock_head_free(struct htree_lock_head *lhead)
++{
++      int     i;
++
++      BUG_ON(!list_empty(&lhead->lh_blocked_list));
++      for (i = 0; i < lhead->lh_depth; i++)
++              BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
++      kfree(lhead);
++}
++EXPORT_SYMBOL(htree_lock_head_free);
++
++/* register event callback for @events of child-lock at level @dep */
++void
++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
++                      unsigned events, htree_event_cb_t callback)
++{
++      BUG_ON(lhead->lh_depth <= dep);
++      lhead->lh_children[dep].lc_events = events;
++      lhead->lh_children[dep].lc_callback = callback;
++}
++EXPORT_SYMBOL(htree_lock_event_attach);
++
++/* allocate a htree_lock, which is per-thread structure, @pbytes is some
++ * extra-bytes as private data for caller */
++struct htree_lock *
++htree_lock_alloc(unsigned depth, unsigned pbytes)
++{
++      struct htree_lock *lck;
++      int i = offsetof(struct htree_lock, lk_nodes[depth]);
++
++      if (depth > HTREE_LOCK_DEP_MAX) {
++              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++                      depth, HTREE_LOCK_DEP_MAX);
++              return NULL;
++      }
++      lck = kzalloc(i + pbytes, GFP_NOFS);
++      if (lck == NULL)
++              return NULL;
++
++      if (pbytes != 0)
++              lck->lk_private = (void *)lck + i;
++      lck->lk_mode = HTREE_LOCK_INVAL;
++      lck->lk_depth = depth;
++      INIT_LIST_HEAD(&lck->lk_blocked_list);
++
++      for (i = 0; i < depth; i++) {
++              struct htree_lock_node *node = &lck->lk_nodes[i];
++
++              node->ln_mode = HTREE_LOCK_INVAL;
++              INIT_LIST_HEAD(&node->ln_major_list);
++              INIT_LIST_HEAD(&node->ln_minor_list);
++              INIT_LIST_HEAD(&node->ln_alive_list);
++              INIT_LIST_HEAD(&node->ln_blocked_list);
++              INIT_LIST_HEAD(&node->ln_granted_list);
++      }
++
++      return lck;
++}
++EXPORT_SYMBOL(htree_lock_alloc);
++
++/* free htree_lock node */
++void
++htree_lock_free(struct htree_lock *lck)
++{
++      BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
++      kfree(lck);
++}
++EXPORT_SYMBOL(htree_lock_free);
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/namei.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/namei.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/namei.c
+@@ -54,6 +54,7 @@ struct buffer_head *ext4_append(handle_t
+                                       ext4_lblk_t *block)
+ {
+       struct buffer_head *bh;
++      struct ext4_inode_info *ei = EXT4_I(inode);
+       int err;
+       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+@@ -61,15 +62,22 @@ struct buffer_head *ext4_append(handle_t
+                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+               return ERR_PTR(-ENOSPC);
++      /* with parallel dir operations all appends
++      * have to be serialized -bzzz */
++      down(&ei->i_append_sem);
++
+       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+       bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
+-      if (IS_ERR(bh))
++      if (IS_ERR(bh)) {
++              up(&ei->i_append_sem);
+               return bh;
++      }
+       inode->i_size += inode->i_sb->s_blocksize;
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
++      up(&ei->i_append_sem);
+       if (err) {
+               brelse(bh);
+               ext4_std_error(inode->i_sb, err);
+@@ -250,7 +258,8 @@ static unsigned dx_node_limit(struct ino
+ static struct dx_frame *dx_probe(struct ext4_filename *fname,
+                                struct inode *dir,
+                                struct dx_hash_info *hinfo,
+-                               struct dx_frame *frame);
++                               struct dx_frame *frame,
++                               struct htree_lock *lck);
+ static void dx_release(struct dx_frame *frames);
+ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+                      unsigned blocksize, struct dx_hash_info *hinfo,
+@@ -264,12 +273,13 @@ static void dx_insert_block(struct dx_fr
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash);
++                               __u32 *start_hash, struct htree_lock *lck);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+               struct ext4_filename *fname,
+-              struct ext4_dir_entry_2 **res_dir);
++              struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode);
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck);
+ /* checksumming functions */
+ void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+@@ -733,6 +743,227 @@ struct stats dx_show_entries(struct dx_h
+ }
+ #endif /* DX_DEBUG */
++/* private data for htree_lock */
++struct ext4_dir_lock_data {
++      unsigned                ld_flags;  /* bits-map for lock types */
++      unsigned                ld_count;  /* # entries of the last DX block */
++      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
++      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
++};
++
++#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
++#define ext4_find_entry(dir, name, dirent, inline) \
++                      __ext4_find_entry(dir, name, dirent, inline, NULL)
++#define ext4_add_entry(handle, dentry, inode) \
++                      __ext4_add_entry(handle, dentry, inode, NULL)
++
++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
++#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
++
++static void ext4_htree_event_cb(void *target, void *event)
++{
++      u64 *block = (u64 *)target;
++
++      if (*block == dx_get_block((struct dx_entry *)event))
++              *block = EXT4_HTREE_NODE_CHANGED;
++}
++
++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
++{
++      struct htree_lock_head *lhead;
++
++      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
++      if (lhead != NULL) {
++              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
++                                      ext4_htree_event_cb);
++      }
++      return lhead;
++}
++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
++
++struct htree_lock *ext4_htree_lock_alloc(void)
++{
++      return htree_lock_alloc(EXT4_LK_MAX,
++                              sizeof(struct ext4_dir_lock_data));
++}
++EXPORT_SYMBOL(ext4_htree_lock_alloc);
++
++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
++{
++      switch (flags) {
++      default: /* 0 or unknown flags require EX lock */
++              return HTREE_LOCK_EX;
++      case EXT4_HLOCK_READDIR:
++              return HTREE_LOCK_PR;
++      case EXT4_HLOCK_LOOKUP:
++              return HTREE_LOCK_CR;
++      case EXT4_HLOCK_DEL:
++      case EXT4_HLOCK_ADD:
++              return HTREE_LOCK_CW;
++      }
++}
++
++/* return PR for read-only operations, otherwise return EX */
++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
++{
++      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
++
++      /* 0 requires EX lock */
++      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
++}
++
++static int ext4_htree_safe_locked(struct htree_lock *lck)
++{
++      int writer;
++
++      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
++              return 1;
++
++      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
++               EXT4_LB_DE;
++      if (writer) /* all readers & writers are excluded? */
++              return lck->lk_mode == HTREE_LOCK_EX;
++
++      /* all writers are excluded? */
++      return lck->lk_mode == HTREE_LOCK_PR ||
++             lck->lk_mode == HTREE_LOCK_PW ||
++             lck->lk_mode == HTREE_LOCK_EX;
++}
++
++/* relock htree_lock with EX mode if it's change operation, otherwise
++ * relock it with PR mode. It's noop if PDO is disabled. */
++static void ext4_htree_safe_relock(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck)) {
++              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
++
++              htree_change_lock(lck, ext4_htree_safe_mode(flags));
++      }
++}
++
++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
++                   struct inode *dir, unsigned flags)
++{
++      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
++                                            ext4_htree_safe_mode(flags);
++
++      ext4_htree_lock_data(lck)->ld_flags = flags;
++      htree_lock(lck, lhead, mode);
++      if (!is_dx(dir))
++              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
++}
++EXPORT_SYMBOL(ext4_htree_lock);
++
++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
++                              unsigned lmask, int wait, void *ev)
++{
++      u32     key = (at == NULL) ? 0 : dx_get_block(at);
++      u32     mode;
++
++      /* NOOP if htree is well protected or caller doesn't require the lock */
++      if (ext4_htree_safe_locked(lck) ||
++         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
++              return 1;
++
++      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
++              HTREE_LOCK_PW : HTREE_LOCK_PR;
++      while (1) {
++              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
++                      return 1;
++              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
++                      return 0;
++              cpu_relax(); /* spin until granted */
++      }
++}
++
++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
++{
++      return ext4_htree_safe_locked(lck) ||
++             htree_node_is_granted(lck, ffz(~lmask));
++}
++
++static void ext4_htree_node_unlock(struct htree_lock *lck,
++                                 unsigned lmask, void *buf)
++{
++      /* NB: it's safe to call mutiple times or even it's not locked */
++      if (!ext4_htree_safe_locked(lck) &&
++           htree_node_is_granted(lck, ffz(~lmask)))
++              htree_node_unlock(lck, ffz(~lmask), buf);
++}
++
++#define ext4_htree_dx_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
++#define ext4_htree_dx_lock_try(lck, key)      \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
++#define ext4_htree_dx_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
++#define ext4_htree_dx_locked(lck)             \
++      ext4_htree_node_locked(lck, EXT4_LB_DX)
++
++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
++{
++      struct ext4_dir_lock_data *ld;
++
++      if (ext4_htree_safe_locked(lck))
++              return;
++
++      ld = ext4_htree_lock_data(lck);
++      switch (ld->ld_flags) {
++      default:
++              return;
++      case EXT4_HLOCK_LOOKUP:
++              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
++              return;
++      case EXT4_HLOCK_DEL:
++              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
++              return;
++      case EXT4_HLOCK_ADD:
++              ld->ld_flags = EXT4_HLOCK_SPLIT;
++              return;
++      }
++}
++
++#define ext4_htree_de_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
++#define ext4_htree_de_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
++
++#define ext4_htree_spin_lock(lck, key, event) \
++      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
++#define ext4_htree_spin_unlock(lck)           \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
++#define ext4_htree_spin_unlock_listen(lck, p) \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
++
++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck) &&
++          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
++              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
++}
++
++enum {
++      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
++      DX_HASH_COL_YES,        /* there is collision and it does matter */
++      DX_HASH_COL_NO,         /* there is no collision */
++};
++
++static int dx_probe_hash_collision(struct htree_lock *lck,
++                                 struct dx_entry *entries,
++                                 struct dx_entry *at, u32 hash)
++{
++      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
++              return DX_HASH_COL_IGNORE; /* don't care about collision */
++
++      } else if (at == entries + dx_get_count(entries) - 1) {
++              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
++
++      } else { /* hash collision? */
++              return ((dx_get_hash(at + 1) & ~1) == hash) ?
++                      DX_HASH_COL_YES : DX_HASH_COL_NO;
++      }
++}
++
+ /*
+  * Probe for a directory leaf block to search.
+  *
+@@ -744,10 +975,11 @@ struct stats dx_show_entries(struct dx_h
+  */
+ static struct dx_frame *
+ dx_probe(struct ext4_filename *fname, struct inode *dir,
+-       struct dx_hash_info *hinfo, struct dx_frame *frame_in)
++       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
++       struct htree_lock *lck)
+ {
+       unsigned count, indirect;
+-      struct dx_entry *at, *entries, *p, *q, *m;
++      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
+       struct dx_root_info *info;
+       struct dx_frame *frame = frame_in;
+       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
+@@ -809,8 +1041,15 @@ dx_probe(struct ext4_filename *fname, st
+       dxtrace(printk("Look up %x", hash));
+       while (1) {
++              if (indirect == 0) { /* the last index level */
++                      /* NB: ext4_htree_dx_lock() could be noop if
++                       * DX-lock flag is not set for current operation */
++                      ext4_htree_dx_lock(lck, dx);
++                      ext4_htree_spin_lock(lck, dx, NULL);
++              }
+               count = dx_get_count(entries);
+-              if (!count || count > dx_get_limit(entries)) {
++              if (count == 0 || count > dx_get_limit(entries)) {
++                      ext4_htree_spin_unlock(lck); /* release spin */
+                       ext4_warning_inode(dir,
+                                          "dx entry: count %u beyond limit %u",
+                                          count, dx_get_limit(entries));
+@@ -849,8 +1088,70 @@ dx_probe(struct ext4_filename *fname, st
+                              dx_get_block(at)));
+               frame->entries = entries;
+               frame->at = at;
+-              if (!indirect--)
++
++              if (indirect == 0) { /* the last index level */
++                      struct ext4_dir_lock_data *ld;
++                      u64 myblock;
++
++                      /* By default we only lock DE-block, however, we will
++                       * also lock the last level DX-block if:
++                       * a) there is hash collision
++                       *    we will set DX-lock flag (a few lines below)
++                       *    and redo to lock DX-block
++                       *    see detail in dx_probe_hash_collision()
++                       * b) it's a retry from splitting
++                       *    we need to lock the last level DX-block so nobody
++                       *    else can split any leaf blocks under the same
++                       *    DX-block, see detail in ext4_dx_add_entry()
++                       */
++                      if (ext4_htree_dx_locked(lck)) {
++                              /* DX-block is locked, just lock DE-block
++                               * and return */
++                              ext4_htree_spin_unlock(lck);
++                              if (!ext4_htree_safe_locked(lck))
++                                      ext4_htree_de_lock(lck, frame->at);
++                              return frame;
++                      }
++                      /* it's pdirop and no DX lock */
++                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
++                          DX_HASH_COL_YES) {
++                              /* found hash collision, set DX-lock flag
++                               * and retry to abtain DX-lock */
++                              ext4_htree_spin_unlock(lck);
++                              ext4_htree_dx_need_lock(lck);
++                              continue;
++                      }
++                      ld = ext4_htree_lock_data(lck);
++                      /* because I don't lock DX, so @at can't be trusted
++                       * after I release spinlock so I have to save it */
++                      ld->ld_at = at;
++                      ld->ld_at_entry = *at;
++                      ld->ld_count = dx_get_count(entries);
++
++                      frame->at = &ld->ld_at_entry;
++                      myblock = dx_get_block(at);
++
++                      /* NB: ordering locking */
++                      ext4_htree_spin_unlock_listen(lck, &myblock);
++                      /* other thread can split this DE-block because:
++                       * a) I don't have lock for the DE-block yet
++                       * b) I released spinlock on DX-block
++                       * if it happened I can detect it by listening
++                       * splitting event on this DE-block */
++                      ext4_htree_de_lock(lck, frame->at);
++                      ext4_htree_spin_stop_listen(lck);
++
++                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
++                              /* someone split this DE-block before
++                               * I locked it, I need to retry and lock
++                               * valid DE-block */
++                              ext4_htree_de_unlock(lck);
++                              continue;
++                      }
+                       return frame;
++              }
++              dx = at;
++              indirect--;
+               frame++;
+               frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+               if (IS_ERR(frame->bh)) {
+@@ -916,7 +1217,7 @@ static void dx_release(struct dx_frame *
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash)
++                               __u32 *start_hash, struct htree_lock *lck)
+ {
+       struct dx_frame *p;
+       struct buffer_head *bh;
+@@ -931,12 +1232,22 @@ static int ext4_htree_next_block(struct
+        * this loop, num_frames indicates the number of interior
+        * nodes need to be read.
+        */
++      ext4_htree_de_unlock(lck);
+       while (1) {
+-              if (++(p->at) < p->entries + dx_get_count(p->entries))
+-                      break;
++              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
++                      /* num_frames > 0 :
++                       *   DX block
++                       * ext4_htree_dx_locked:
++                       *   frame->at is reliable pointer returned by dx_probe,
++                       *   otherwise dx_probe already knew no collision */
++                      if (++(p->at) < p->entries + dx_get_count(p->entries))
++                              break;
++              }
+               if (p == frames)
+                       return 0;
+               num_frames++;
++              if (num_frames == 1)
++                      ext4_htree_dx_unlock(lck);
+               p--;
+       }
+@@ -959,6 +1270,13 @@ static int ext4_htree_next_block(struct
+        * block so no check is necessary
+        */
+       while (num_frames--) {
++              if (num_frames == 0) {
++                      /* it's not always necessary, we just don't want to
++                       * detect hash collision again */
++                      ext4_htree_dx_need_lock(lck);
++                      ext4_htree_dx_lock(lck, p->at);
++              }
++
+               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
+@@ -967,6 +1285,7 @@ static int ext4_htree_next_block(struct
+               p->bh = bh;
+               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+       }
++      ext4_htree_de_lock(lck, p->at);
+       return 1;
+ }
+@@ -1114,10 +1433,10 @@ int ext4_htree_fill_tree(struct file *di
+       }
+       hinfo.hash = start_hash;
+       hinfo.minor_hash = 0;
+-      frame = dx_probe(NULL, dir, &hinfo, frames);
++      /* assume it's PR locked */
++      frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+-
+       /* Add '.' and '..' from the htree header */
+       if (!start_hash && !start_minor_hash) {
+               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+@@ -1157,7 +1476,7 @@ int ext4_htree_fill_tree(struct file *di
+               count += ret;
+               hashval = ~0;
+               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+-                                          frame, frames, &hashval);
++                                          frame, frames, &hashval, NULL);
+               *next_hash = hashval;
+               if (ret < 0) {
+                       err = ret;
+@@ -1349,10 +1668,10 @@ static int is_dx_internal_node(struct in
+  * The returned buffer_head has ->b_count elevated.  The caller is expected
+  * to brelse() it when appropriate.
+  */
+-static struct buffer_head * ext4_find_entry (struct inode *dir,
++struct buffer_head *__ext4_find_entry(struct inode *dir,
+                                       const struct qstr *d_name,
+                                       struct ext4_dir_entry_2 **res_dir,
+-                                      int *inlined)
++                                      int *inlined, struct htree_lock *lck)
+ {
+       struct super_block *sb;
+       struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -1401,7 +1720,7 @@ static struct buffer_head * ext4_find_en
+               goto restart;
+       }
+       if (is_dx(dir)) {
+-              ret = ext4_dx_find_entry(dir, &fname, res_dir);
++              ret = ext4_dx_find_entry(dir, &fname, res_dir, lck);
+               /*
+                * On success, or if the error was file not found,
+                * return.  Otherwise, fall back to doing a search the
+@@ -1411,6 +1730,7 @@ static struct buffer_head * ext4_find_en
+                       goto cleanup_and_exit;
+               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+                              "falling back\n"));
++              ext4_htree_safe_relock(lck);
+               ret = NULL;
+       }
+       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -1499,10 +1819,12 @@ cleanup_and_exit:
+       ext4_fname_free_filename(&fname);
+       return ret;
+ }
++EXPORT_SYMBOL(__ext4_find_entry);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+                       struct ext4_filename *fname,
+-                      struct ext4_dir_entry_2 **res_dir)
++                      struct ext4_dir_entry_2 **res_dir,
++                      struct htree_lock *lck)
+ {
+       struct super_block * sb = dir->i_sb;
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+@@ -1513,7 +1835,7 @@ static struct buffer_head * ext4_dx_find
+ #ifdef CONFIG_EXT4_FS_ENCRYPTION
+       *res_dir = NULL;
+ #endif
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return (struct buffer_head *) frame;
+       do {
+@@ -1535,7 +1857,7 @@ static struct buffer_head * ext4_dx_find
+               /* Check to see if we should continue to search */
+               retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
+-                                             frames, NULL);
++                                             frames, NULL, lck);
+               if (retval < 0) {
+                       ext4_warning_inode(dir,
+                               "error %d reading directory index block",
+@@ -1710,8 +2032,9 @@ static struct ext4_dir_entry_2* dx_pack_
+  * Returns pointer to de in block into which the new entry will be inserted.
+  */
+ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+-                      struct buffer_head **bh,struct dx_frame *frame,
+-                      struct dx_hash_info *hinfo)
++                      struct buffer_head **bh, struct dx_frame *frames,
++                      struct dx_frame *frame, struct dx_hash_info *hinfo,
++                      struct htree_lock *lck)
+ {
+       unsigned blocksize = dir->i_sb->s_blocksize;
+       unsigned count, continued;
+@@ -1773,8 +2096,14 @@ static struct ext4_dir_entry_2 *do_split
+                                       hash2, split, count-split));
+       /* Fancy dance to stay within two buffers */
+-      de2 = dx_move_dirents(data1, data2, map + split, count - split,
+-                            blocksize);
++      if (hinfo->hash < hash2) {
++              de2 = dx_move_dirents(data1, data2, map + split,
++                                    count - split, blocksize);
++      } else {
++              /* make sure we will add entry to the same block which
++               * we have already locked */
++              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
++      }
+       de = dx_pack_dirents(data1, blocksize);
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
+@@ -1795,12 +2124,21 @@ static struct ext4_dir_entry_2 *do_split
+       dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+                       blocksize, 1));
+-      /* Which block gets the new entry? */
+-      if (hinfo->hash >= hash2) {
+-              swap(*bh, bh2);
+-              de = de2;
++      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
++                           frame->at); /* notify block is being split */
++      if (hinfo->hash < hash2) {
++              dx_insert_block(frame, hash2 + continued, newblock);
++
++      } else {
++              /* switch block number */
++              dx_insert_block(frame, hash2 + continued,
++                              dx_get_block(frame->at));
++              dx_set_block(frame->at, newblock);
++              (frame->at)++;
+       }
+-      dx_insert_block(frame, hash2 + continued, newblock);
++      ext4_htree_spin_unlock(lck);
++      ext4_htree_dx_unlock(lck);
++
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
+       if (err)
+               goto journal_error;
+@@ -2074,7 +2412,7 @@ static int make_indexed_dir(handle_t *ha
+       if (retval)
+               goto out_frames;        
+-      de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
+       if (IS_ERR(de)) {
+               retval = PTR_ERR(de);
+               goto out_frames;
+@@ -2184,8 +2522,8 @@ out:
+  * may not sleep between calling this and putting something into
+  * the entry, as someone else might have used it while you slept.
+  */
+-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-                        struct inode *inode)
++int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
++                    struct inode *inode, struct htree_lock *lck)
+ {
+       struct inode *dir = d_inode(dentry->d_parent);
+       struct buffer_head *bh = NULL;
+@@ -2226,9 +2564,10 @@ static int ext4_add_entry(handle_t *hand
+               if (dentry->d_name.len == 2 &&
+                    memcmp(dentry->d_name.name, "..", 2) == 0)
+                        return ext4_update_dotdot(handle, dentry, inode);
+-              retval = ext4_dx_add_entry(handle, &fname, dir, inode);
++              retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       goto out;
++              ext4_htree_safe_relock(lck);
+               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+               dx_fallback++;
+               ext4_mark_inode_dirty(handle, dir);
+@@ -2278,12 +2617,14 @@ out:
+               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+       return retval;
+ }
++EXPORT_SYMBOL(__ext4_add_entry);
+ /*
+  * Returns 0 for success, or a negative error value
+  */
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode)
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck)
+ {
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+       struct dx_entry *entries, *at;
+@@ -2295,7 +2636,7 @@ static int ext4_dx_add_entry(handle_t *h
+ again:
+       restart = 0;
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+       entries = frame->entries;
+@@ -2330,6 +2671,11 @@ again:
+               struct dx_node *node2;
+               struct buffer_head *bh2;
++              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
++                      ext4_htree_safe_relock(lck);
++                      restart = 1;
++                      goto cleanup;
++              }
+               while (frame > frames) {
+                       if (dx_get_count((frame - 1)->entries) <
+                           dx_get_limit((frame - 1)->entries)) {
+@@ -2432,8 +2778,32 @@ again:
+                       restart = 1;
+                       goto journal_error;
+               }
++      } else if (!ext4_htree_dx_locked(lck)) {
++              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
++
++              /* not well protected, require DX lock */
++              ext4_htree_dx_need_lock(lck);
++              at = frame > frames ? (frame - 1)->at : NULL;
++
++              /* NB: no risk of deadlock because it's just a try.
++               *
++               * NB: we check ld_count for twice, the first time before
++               * having DX lock, the second time after holding DX lock.
++               *
++               * NB: We never free blocks for directory so far, which
++               * means value returned by dx_get_count() should equal to
++               * ld->ld_count if nobody split any DE-block under @at,
++               * and ld->ld_at still points to valid dx_entry. */
++              if ((ld->ld_count != dx_get_count(entries)) ||
++                  !ext4_htree_dx_lock_try(lck, at) ||
++                  (ld->ld_count != dx_get_count(entries))) {
++                      restart = 1;
++                      goto cleanup;
++              }
++              /* OK, I've got DX lock and nothing changed */
++              frame->at = ld->ld_at;
+       }
+-      de = do_split(handle, dir, &bh, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
+       if (IS_ERR(de)) {
+               err = PTR_ERR(de);
+               goto cleanup;
+@@ -2444,6 +2814,8 @@ again:
+ journal_error:
+       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
+ cleanup:
++      ext4_htree_dx_unlock(lck);
++      ext4_htree_de_unlock(lck);
+       brelse(bh);
+       dx_release(frames);
+       /* @restart is true means htree-path has been changed, we need to
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/super.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/super.c
+@@ -1009,6 +1009,7 @@ static struct inode *ext4_alloc_inode(st
+       inode_set_iversion(&ei->vfs_inode, 1);
+       spin_lock_init(&ei->i_raw_lock);
++      sema_init(&ei->i_append_sem, 1);
+       INIT_LIST_HEAD(&ei->i_prealloc_list);
+       spin_lock_init(&ei->i_prealloc_lock);
+       ext4_es_init_tree(&ei->i_es_tree);
+Index: linux-4.18.0-80.1.2.el8_0/include/linux/htree_lock.h
+===================================================================
+--- /dev/null
++++ linux-4.18.0-80.1.2.el8_0/include/linux/htree_lock.h
+@@ -0,0 +1,187 @@
++/*
++ * include/linux/htree_lock.h
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++
++/*
++ * htree lock
++ *
++ * htree_lock is an advanced lock, it can support five lock modes (concept is
++ * taken from DLM) and it's a sleeping lock.
++ *
++ * most common use case is:
++ * - create a htree_lock_head for data
++ * - each thread (contender) creates it's own htree_lock
++ * - contender needs to call htree_lock(lock_node, mode) to protect data and
++ *   call htree_unlock to release lock
++ *
++ * Also, there is advanced use-case which is more complex, user can have
++ * PW/PR lock on particular key, it's mostly used while user holding shared
++ * lock on the htree (CW, CR)
++ *
++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
++ * ...
++ * htree_node_unlock(lock_node);; unlock the key
++ *
++ * Another tip is, we can have N-levels of this kind of keys, all we need to
++ * do is specifying N-levels while creating htree_lock_head, then we can
++ * lock/unlock a specific level by:
++ * htree_node_lock(lock_node, mode1, key1, level1...);
++ * do something;
++ * htree_node_lock(lock_node, mode1, key2, level2...);
++ * do something;
++ * htree_node_unlock(lock_node, level2);
++ * htree_node_unlock(lock_node, level1);
++ *
++ * NB: for multi-level, should be careful about locking order to avoid deadlock
++ */
++
++#ifndef _LINUX_HTREE_LOCK_H
++#define _LINUX_HTREE_LOCK_H
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++
++/*
++ * Lock Modes
++ * more details can be found here:
++ * http://en.wikipedia.org/wiki/Distributed_lock_manager
++ */
++typedef enum {
++      HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
++      HTREE_LOCK_PW,       /* protected write: allows only CR users */
++      HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
++      HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
++      HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
++      HTREE_LOCK_MAX,      /* number of lock modes */
++} htree_lock_mode_t;
++
++#define HTREE_LOCK_NL         HTREE_LOCK_MAX
++#define HTREE_LOCK_INVAL      0xdead10c
++
++enum {
++      HTREE_HBITS_MIN         = 2,
++      HTREE_HBITS_DEF         = 14,
++      HTREE_HBITS_MAX         = 32,
++};
++
++enum {
++      HTREE_EVENT_DISABLE     = (0),
++      HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
++      HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
++      HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
++};
++
++struct htree_lock;
++
++typedef void (*htree_event_cb_t)(void *target, void *event);
++
++struct htree_lock_child {
++      struct list_head        lc_list;        /* granted list */
++      htree_event_cb_t        lc_callback;    /* event callback */
++      unsigned                lc_events;      /* event types */
++};
++
++struct htree_lock_head {
++      unsigned long           lh_lock;        /* bits lock */
++      /* blocked lock list (htree_lock) */
++      struct list_head        lh_blocked_list;
++      /* # key levels */
++      u16                     lh_depth;
++      /* hash bits for key and limit number of locks */
++      u16                     lh_hbits;
++      /* counters for blocked locks */
++      u16                     lh_nblocked[HTREE_LOCK_MAX];
++      /* counters for granted locks */
++      u16                     lh_ngranted[HTREE_LOCK_MAX];
++      /* private data */
++      void                    *lh_private;
++      /* array of children locks */
++      struct htree_lock_child lh_children[0];
++};
++
++/* htree_lock_node_t is child-lock for a specific key (ln_value) */
++struct htree_lock_node {
++      htree_lock_mode_t       ln_mode;
++      /* major hash key */
++      u16                     ln_major_key;
++      /* minor hash key */
++      u16                     ln_minor_key;
++      struct list_head        ln_major_list;
++      struct list_head        ln_minor_list;
++      /* alive list, all locks (granted, blocked, listening) are on it */
++      struct list_head        ln_alive_list;
++      /* blocked list */
++      struct list_head        ln_blocked_list;
++      /* granted list */
++      struct list_head        ln_granted_list;
++      void                    *ln_ev_target;
++};
++
++struct htree_lock {
++      struct task_struct      *lk_task;
++      struct htree_lock_head  *lk_head;
++      void                    *lk_private;
++      unsigned                lk_depth;
++      htree_lock_mode_t       lk_mode;
++      struct list_head        lk_blocked_list;
++      struct htree_lock_node  lk_nodes[0];
++};
++
++/* create a lock head, which stands for a resource */
++struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
++                                            unsigned hbits, unsigned priv);
++/* free a lock head */
++void htree_lock_head_free(struct htree_lock_head *lhead);
++/* register event callback for child lock at level @depth */
++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
++                           unsigned events, htree_event_cb_t callback);
++/* create a lock handle, which stands for a thread */
++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
++/* free a lock handle */
++void htree_lock_free(struct htree_lock *lck);
++/* lock htree, when @wait is true, 0 is returned if the lock can't
++ * be granted immediately */
++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++                 htree_lock_mode_t mode, int wait);
++/* unlock htree */
++void htree_unlock(struct htree_lock *lck);
++/* unlock and relock htree with @new_mode */
++int htree_change_lock_try(struct htree_lock *lck,
++                        htree_lock_mode_t new_mode, int wait);
++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
++/* require child lock (key) of htree at level @dep, @event will be sent to all
++ * listeners on this @key while lock being granted */
++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++                      u32 key, unsigned dep, int wait, void *event);
++/* release child lock at level @dep, this lock will listen on it's key
++ * if @event isn't NULL, event_cb will be called against @lck while granting
++ * any other lock at level @dep with the same key */
++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
++/* stop listening on child lock at level @dep */
++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
++/* for debug */
++void htree_lock_stat_print(int depth);
++void htree_lock_stat_reset(void);
++
++#define htree_lock(lck, lh, mode)     htree_lock_try(lck, lh, mode, 1)
++#define htree_change_lock(lck, mode)  htree_change_lock_try(lck, mode, 1)
++
++#define htree_lock_mode(lck)          ((lck)->lk_mode)
++
++#define htree_node_lock(lck, mode, key, dep)  \
++      htree_node_lock_try(lck, mode, key, dep, 1, NULL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_granted(lck, dep)               \
++      ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
++       (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_listening(lck, dep)     \
++      ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
++
++#endif
diff --git a/ldiskfs/kernel_patches/patches/rhel8/ext4-prealloc.patch b/ldiskfs/kernel_patches/patches/rhel8/ext4-prealloc.patch
new file mode 100644 (file)
index 0000000..1c9a00a
--- /dev/null
@@ -0,0 +1,387 @@
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/ext4.h
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/ext4.h
+@@ -1185,6 +1185,8 @@ extern void ext4_set_bits(void *bm, int
+ /* Metadata checksum algorithm codes */
+ #define EXT4_CRC32C_CHKSUM            1
++#define EXT4_MAX_PREALLOC_TABLE       64
++
+ /*
+  * Structure of the super block
+  */
+@@ -1418,11 +1420,13 @@ struct ext4_sb_info {
+       /* tunables */
+       unsigned long s_stripe;
+-      unsigned int s_mb_stream_request;
++      unsigned long s_mb_small_req;
++      unsigned long s_mb_large_req;
+       unsigned int s_mb_max_to_scan;
+       unsigned int s_mb_min_to_scan;
+       unsigned int s_mb_stats;
+       unsigned int s_mb_order2_reqs;
++      unsigned long *s_mb_prealloc_table;
+       unsigned int s_mb_group_prealloc;
+       unsigned int s_max_dir_size_kb;
+       /* where last allocation was done - for stream allocation */
+@@ -2397,6 +2401,7 @@ extern int ext4_init_inode_table(struct
+ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
+ /* mballoc.c */
++extern const struct file_operations ext4_seq_prealloc_table_fops;
+ extern const struct seq_operations ext4_mb_seq_groups_ops;
+ extern long ext4_mb_stats;
+ extern long ext4_mb_max_to_scan;
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/inode.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/inode.c
+@@ -2769,6 +2769,9 @@ static int ext4_writepages(struct addres
+               ext4_journal_stop(handle);
+       }
++      if (wbc->nr_to_write < sbi->s_mb_small_req)
++              wbc->nr_to_write = sbi->s_mb_small_req;
++
+       if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+               range_whole = 1;
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/mballoc.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/mballoc.c
+@@ -2339,6 +2339,100 @@ const struct seq_operations ext4_mb_seq_
+       .show   = ext4_mb_seq_groups_show,
+ };
++static int ext4_mb_check_and_update_prealloc(struct ext4_sb_info *sbi,
++                                               char *str, size_t cnt,
++                                               int update)
++{
++      unsigned long value;
++      unsigned long prev = 0;
++      char *cur;
++      char *next;
++      char *end;
++      int num = 0;
++
++      cur = str;
++      end = str + cnt;
++      while (cur < end) {
++              while ((cur < end) && (*cur == ' ')) cur++;
++              value = simple_strtol(cur, &next, 0);
++              if (value == 0)
++                      break;
++              if (cur == next)
++                      return -EINVAL;
++
++              cur = next;
++
++              if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
++                      return -EINVAL;
++
++              /* they should add values in order */
++              if (value <= prev)
++                      return -EINVAL;
++
++              if (update)
++                      sbi->s_mb_prealloc_table[num] = value;
++
++              prev = value;
++              num++;
++      }
++
++      if (num > EXT4_MAX_PREALLOC_TABLE - 1)
++              return -EOVERFLOW;
++
++      if (update)
++              sbi->s_mb_prealloc_table[num] = 0;
++
++      return 0;
++}
++
++static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
++                                           const char __user *buf,
++                                           size_t cnt, loff_t *pos)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
++      char str[128];
++      int rc;
++
++      if (cnt >= sizeof(str))
++              return -EINVAL;
++      if (copy_from_user(str, buf, cnt))
++              return -EFAULT;
++
++      rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 0);
++      if (rc)
++              return rc;
++
++      rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 1);
++      return rc ? rc : cnt;
++}
++
++static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++      int i;
++
++      for (i = 0; i < EXT4_MAX_PREALLOC_TABLE &&
++                      sbi->s_mb_prealloc_table[i] != 0; i++)
++              seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
++      seq_printf(m, "\n");
++
++      return 0;
++}
++
++static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
++}
++
++const struct file_operations ext4_seq_prealloc_table_fops = {
++      .owner   = THIS_MODULE,
++      .open    = mb_prealloc_table_seq_open,
++      .read    = seq_read,
++      .llseek  = seq_lseek,
++      .release = single_release,
++      .write   = ext4_mb_prealloc_table_proc_write,
++};
++
+ static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
+ {
+       int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+@@ -2566,7 +2660,7 @@ static int ext4_groupinfo_create_slab(si
+ int ext4_mb_init(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      unsigned i, j;
++      unsigned i, j, k, l;
+       unsigned offset, offset_incr;
+       unsigned max;
+       int ret;
+@@ -2615,7 +2709,6 @@ int ext4_mb_init(struct super_block *sb)
+       sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
+       sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+       sbi->s_mb_stats = MB_DEFAULT_STATS;
+-      sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
+       sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+       /*
+        * The default group preallocation is 512, which for 4k block
+@@ -2639,9 +2732,29 @@ int ext4_mb_init(struct super_block *sb)
+        * RAID stripe size so that preallocations don't fragment
+        * the stripes.
+        */
+-      if (sbi->s_stripe > 1) {
+-              sbi->s_mb_group_prealloc = roundup(
+-                      sbi->s_mb_group_prealloc, sbi->s_stripe);
++
++      /* Allocate table once */
++      sbi->s_mb_prealloc_table = kzalloc(
++              EXT4_MAX_PREALLOC_TABLE * sizeof(unsigned long), GFP_NOFS);
++      if (sbi->s_mb_prealloc_table == NULL) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      if (sbi->s_stripe == 0) {
++              for (k = 0, l = 4; k <= 9; ++k, l *= 2)
++                      sbi->s_mb_prealloc_table[k] = l;
++
++              sbi->s_mb_small_req = 256;
++              sbi->s_mb_large_req = 1024;
++              sbi->s_mb_group_prealloc = 512;
++      } else {
++              for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2)
++                      sbi->s_mb_prealloc_table[k] = l;
++
++              sbi->s_mb_small_req = sbi->s_stripe;
++              sbi->s_mb_large_req = sbi->s_stripe * 8;
++              sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
+       }
+       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
+@@ -2669,6 +2782,7 @@ out_free_locality_groups:
+       free_percpu(sbi->s_locality_groups);
+       sbi->s_locality_groups = NULL;
+ out:
++      kfree(sbi->s_mb_prealloc_table);
+       kfree(sbi->s_mb_offsets);
+       sbi->s_mb_offsets = NULL;
+       kfree(sbi->s_mb_maxs);
+@@ -2930,7 +3044,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
+       int err, len;
+       BUG_ON(ac->ac_status != AC_STATUS_FOUND);
+-      BUG_ON(ac->ac_b_ex.fe_len <= 0);
+       sb = ac->ac_sb;
+       sbi = EXT4_SB(sb);
+@@ -3060,13 +3173,14 @@ ext4_mb_normalize_request(struct ext4_al
+                               struct ext4_allocation_request *ar)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+-      int bsbits, max;
++      int bsbits, i, wind;
+       ext4_lblk_t end;
+-      loff_t size, start_off;
++      loff_t size;
+       loff_t orig_size __maybe_unused;
+       ext4_lblk_t start;
+       struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+       struct ext4_prealloc_space *pa;
++      unsigned long value, last_non_zero;
+       /* do normalize only data requests, metadata requests
+          do not need preallocation */
+@@ -3095,51 +3209,46 @@ ext4_mb_normalize_request(struct ext4_al
+       size = size << bsbits;
+       if (size < i_size_read(ac->ac_inode))
+               size = i_size_read(ac->ac_inode);
+-      orig_size = size;
++      size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
++
++      start = wind = 0;
++      value = last_non_zero = 0;
+-      /* max size of free chunks */
+-      max = 2 << bsbits;
++      /* let's choose preallocation window depending on file size */
++      for (i = 0; i < EXT4_MAX_PREALLOC_TABLE; i++) {
++              value = sbi->s_mb_prealloc_table[i];
++              if (value == 0)
++                      break;
++              else
++                      last_non_zero = value;
+-#define NRL_CHECK_SIZE(req, size, max, chunk_size)    \
+-              (req <= (size) || max <= (chunk_size))
++              if (size <= value) {
++                      wind = value;
++                      break;
++              }
++      }
+-      /* first, try to predict filesize */
+-      /* XXX: should this table be tunable? */
+-      start_off = 0;
+-      if (size <= 16 * 1024) {
+-              size = 16 * 1024;
+-      } else if (size <= 32 * 1024) {
+-              size = 32 * 1024;
+-      } else if (size <= 64 * 1024) {
+-              size = 64 * 1024;
+-      } else if (size <= 128 * 1024) {
+-              size = 128 * 1024;
+-      } else if (size <= 256 * 1024) {
+-              size = 256 * 1024;
+-      } else if (size <= 512 * 1024) {
+-              size = 512 * 1024;
+-      } else if (size <= 1024 * 1024) {
+-              size = 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                              (21 - bsbits)) << 21;
+-              size = 2 * 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                                      (22 - bsbits)) << 22;
+-              size = 4 * 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
+-                                      (8<<20)>>bsbits, max, 8 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                                      (23 - bsbits)) << 23;
+-              size = 8 * 1024 * 1024;
++      if (wind == 0) {
++              if (last_non_zero != 0) {
++                      __u64 tstart, tend;
++                      /* file is quite large, we now preallocate with
++                      * the biggest configured window with regart to
++                      * logical offset */
++                      wind = last_non_zero;
++                      tstart = ac->ac_o_ex.fe_logical;
++                      do_div(tstart, wind);
++                      start = tstart * wind;
++                      tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
++                      do_div(tend, wind);
++                      tend = tend * wind + wind;
++                      size = tend - start;
++              }
+       } else {
+-              start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
+-              size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
+-                                            ac->ac_o_ex.fe_len) << bsbits;
++              size = wind;
+       }
+-      size = size >> bsbits;
+-      start = start_off >> bsbits;
++
++
++      orig_size = size;
+       /* don't cover already allocated blocks in selected range */
+       if (ar->pleft && start <= ar->lleft) {
+@@ -3221,7 +3330,6 @@ ext4_mb_normalize_request(struct ext4_al
+                        (unsigned long) ac->ac_o_ex.fe_logical);
+               BUG();
+       }
+-      BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+       /* now prepare goal request */
+@@ -4190,11 +4298,19 @@ static void ext4_mb_group_or_file(struct
+       /* don't use group allocation for large files */
+       size = max(size, isize);
+-      if (size > sbi->s_mb_stream_request) {
++      if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
++          (size >= sbi->s_mb_large_req)) {
+               ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+               return;
+       }
++      /*
++       * request is so large that we don't care about
++       * streaming - it overweights any possible seek
++       */
++      if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
++              return;
++
+       BUG_ON(ac->ac_lg != NULL);
+       /*
+        * locality group prealloc space are per cpu. The reason for having
+Index: linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
+===================================================================
+--- linux-4.18.0-80.1.2.el8_0.orig/fs/ext4/sysfs.c
++++ linux-4.18.0-80.1.2.el8_0/fs/ext4/sysfs.c
+@@ -173,7 +173,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
+ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+ EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
+@@ -201,7 +202,8 @@ static struct attribute *ext4_attrs[] =
+       ATTR_LIST(mb_max_to_scan),
+       ATTR_LIST(mb_min_to_scan),
+       ATTR_LIST(mb_order2_req),
+-      ATTR_LIST(mb_stream_req),
++      ATTR_LIST(mb_small_req),
++      ATTR_LIST(mb_large_req),
+       ATTR_LIST(mb_group_prealloc),
+       ATTR_LIST(max_writeback_mb_bump),
+       ATTR_LIST(extent_max_zeroout_kb),
+@@ -379,6 +381,8 @@ int ext4_register_sysfs(struct super_blo
+                               sb);
+               proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
+                               &ext4_mb_seq_groups_ops, sb);
++              proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
++                              &ext4_seq_prealloc_table_fops, sb);
+       }
+       return 0;
+ }
index 826b2d7..62c507f 100644 (file)
@@ -72,7 +72,7 @@ Index: linux-4.15.0/fs/ext4/namei.c
 +        if (info->hash_version != DX_HASH_TEA &&
 +            info->hash_version != DX_HASH_HALF_MD4 &&
 +            info->hash_version != DX_HASH_LEGACY) {
 +        if (info->hash_version != DX_HASH_TEA &&
 +            info->hash_version != DX_HASH_HALF_MD4 &&
 +            info->hash_version != DX_HASH_LEGACY) {
-+              ldiskfs_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
++              ext4_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
 +                             "#%lu", info->hash_version, dir->i_ino);
                goto fail;
        }
 +                             "#%lu", info->hash_version, dir->i_ino);
                goto fail;
        }
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-4.18-rhel8.series b/ldiskfs/kernel_patches/series/ldiskfs-4.18-rhel8.series
new file mode 100644 (file)
index 0000000..843a6d7
--- /dev/null
@@ -0,0 +1,24 @@
+rhel8/ext4-inode-version.patch
+sles12sp2/ext4-lookup-dotdot.patch
+sles12sp2/ext4-print-inum-in-htree-warning.patch
+rhel8/ext4-prealloc.patch
+ubuntu18/ext4-osd-iop-common.patch
+rhel8/ext4-misc.patch
+rhel8/ext4-mballoc-extra-checks.patch
+ubuntu18/ext4-hash-indexed-dir-dotdot-update.patch
+ubuntu18/ext4-kill-dx-root.patch
+rhel7/ext4-mballoc-pa-free-mismatch.patch
+ubuntu18/ext4-data-in-dirent.patch
+rhel8/ext4-nocmtime.patch
+rhel8/ext4-pdirop.patch
+sles12sp3/ext4-max-dir-size.patch
+ubuntu18/ext4-remove-truncate-warning.patch
+rhel8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
+ubuntu18/ext4-give-warning-with-dir-htree-growing.patch
+ubuntu18/ext4-jcb-optimization.patch
+ubuntu18/ext4-attach-jinode-in-writepages.patch
+rhel8/ext4-dont-check-before-replay.patch
+rhel7/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
+rhel7/ext4-export-orphan-add.patch
+ubuntu18/ext4-include-terminating-u32-in-size-of-xattr-entries-when-expanding-inodes.patch
+rhel8/ext4-export-mb-stream-allocator-variables.patch