Whamcloud - gitweb
LU-12904 ldiskfs: Add ldiskfs support for linux 5.4 83/36583/6
authorShaun Tancheff <stancheff@cray.com>
Wed, 15 Jan 2020 13:47:00 +0000 (07:47 -0600)
committerOleg Drokin <green@whamcloud.com>
Thu, 23 Jan 2020 05:31:20 +0000 (05:31 +0000)
Linux 5.4 ext4 has some changes from 5.0 this
fixes up the ldiskfs patches to apply against 5.4

Test-Parameters: trivial
Cray-bug-id: LUS-8042
Signed-off-by: Shaun Tancheff <stancheff@cray.com>
Change-Id: I116226ec9297eead4dfd3403be748f732e67f54f
Reviewed-on: https://review.whamcloud.com/36583
Reviewed-by: Petros Koutoupis <pkoutoupis@cray.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Jian Yu <yujian@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
14 files changed:
config/lustre-build-ldiskfs.m4
config/lustre-build.m4
ldiskfs/kernel_patches/patches/linux-5.4/export-ext4fs-dirhash-helper.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-attach-jinode-in-writepages.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-data-in-dirent.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-give-warning-with-dir-htree-growing.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-hash-indexed-dir-dotdot-update.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-kill-dx-root.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-lookup-dotdot.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-misc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.4/ext4-pdirop.patch [new file with mode: 0644]
ldiskfs/kernel_patches/series/ldiskfs-5.4.0-ml.series [new file with mode: 0644]
lustre/osd-ldiskfs/osd_iam_lvar.c
lustre/osd-ldiskfs/osd_internal.h

index dd5b4f7..98dfae7 100644 (file)
@@ -92,6 +92,18 @@ AS_IF([test x$RHEL_KERNEL = xyes], [
        [LDISKFS_SERIES="5.0.0-13-ubuntu19.series"])
 ])
 ])
        [LDISKFS_SERIES="5.0.0-13-ubuntu19.series"])
 ])
 ])
+# Not RHEL/SLES or Ubuntu .. probably mainline
+AS_IF([test -z "$LDISKFS_SERIES"],
+       [
+       AS_VERSION_COMPARE([$LINUXRELEASE],[5.4.0],[],
+       [LDISKFS_SERIES="5.4.0-ml.series"],[
+       AS_VERSION_COMPARE([$LINUXRELEASE],[5.4.0],
+               [LDISKFS_SERIES="5.4.0-ml.series"], # lt
+               [LDISKFS_SERIES="5.4.0-ml.series"], # eq
+               [LDISKFS_SERIES="5.4.0-ml.series"]  # gt
+               )])
+       ],
+[])
 AS_IF([test -z "$LDISKFS_SERIES"],
        [AC_MSG_RESULT([failed to identify series])],
        [AC_MSG_RESULT([$LDISKFS_SERIES])])
 AS_IF([test -z "$LDISKFS_SERIES"],
        [AC_MSG_RESULT([failed to identify series])],
        [AC_MSG_RESULT([$LDISKFS_SERIES])])
@@ -138,12 +150,24 @@ ext4_journal_start, [
 # LB_EXT4_BREAD_4ARGS
 #
 # 3.18 ext4_bread has 4 arguments
 # LB_EXT4_BREAD_4ARGS
 #
 # 3.18 ext4_bread has 4 arguments
+# NOTE: It may not be exported for modules, use a positive compiler test here.
 #
 AC_DEFUN([LB_EXT4_BREAD_4ARGS], [
 LB_CHECK_COMPILE([if ext4_bread takes 4 arguments],
 ext4_bread, [
        #include <linux/fs.h>
        #include "$EXT4_SRC_DIR/ext4.h"
 #
 AC_DEFUN([LB_EXT4_BREAD_4ARGS], [
 LB_CHECK_COMPILE([if ext4_bread takes 4 arguments],
 ext4_bread, [
        #include <linux/fs.h>
        #include "$EXT4_SRC_DIR/ext4.h"
+
+       struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
+                                      ext4_lblk_t block, int map_flags)
+       {
+               struct buffer_head *bh = NULL;
+               (void)handle;
+               (void)inode;
+               (void)block;
+               (void)map_flags;
+               return bh;
+       }
 ],[
        ext4_bread(NULL, NULL, 0, 0);
 ],[
 ],[
        ext4_bread(NULL, NULL, 0, 0);
 ],[
@@ -287,6 +311,72 @@ EXTRA_KCFLAGS="$tmp_flags"
 ]) # LB_HAVE_BVEC_ITER_ALL
 
 #
 ]) # LB_HAVE_BVEC_ITER_ALL
 
 #
+# LB_LDISKFS_FIND_ENTRY_LOCKED_EXISTS
+#
+# kernel 5.2 commit 8a363970d1dc38c4ec4ad575c862f776f468d057
+# ext4: avoid declaring fs inconsistent due to invalid file handles
+# __ext4_find_entry became a helper function for ext4_find_entry
+# conflicting with previous ldiskfs patches.
+# ldiskfs patches map ext4_find_entry to ldiskfs_find_entry_locked to
+# avoid conflicting with __ext4_find_entry
+#
+# When the following check succeeds __ext4_find_entry helper is not
+# used.
+#
+AC_DEFUN([LB_LDISKFS_FIND_ENTRY_LOCKED_EXISTS], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if __ldiskfs_find_entry is available],
+ldiskfs_find_entry_locked, [
+       #include <linux/fs.h>
+       #include "$EXT4_SRC_DIR/ext4.h"
+       #include "$EXT4_SRC_DIR/namei.c"
+
+       static int __ext4_find_entry(void) { return 0; }
+],[
+       int x = __ext4_find_entry();
+       (void)x;
+],[
+       AC_DEFINE(HAVE___LDISKFS_FIND_ENTRY, 1,
+               [if __ldiskfs_find_entry is available])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LB_LDISKFS_FIND_ENTRY_LOCKED_EXISTS
+
+#
+# LB_LDISKFSFS_DIRHASH_WANTS_DIR
+#
+# kernel 5.2 commit 8a363970d1dc38c4ec4ad575c862f776f468d057
+# ext4fs_dirhash UNICODE support
+#
+AC_DEFUN([LB_LDISKFSFS_DIRHASH_WANTS_DIR], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if ldiskfsfs_dirhash takes an inode argument],
+ext4fs_dirhash, [
+       #include <linux/fs.h>
+       #include "$EXT4_SRC_DIR/ext4.h"
+
+       int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
+                         struct dx_hash_info *hinfo)
+       {
+               (void)dir;
+               (void)name;
+               (void)len;
+               (void)hinfo;
+               return 0;
+       }
+],[
+       int f = ext4fs_dirhash(NULL, NULL, 0, NULL);
+       (void)f;
+],[
+       AC_DEFINE(HAVE_LDISKFSFS_GETHASH_INODE_ARG, 1,
+               [if ldiskfsfs_dirhash takes an inode argument])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LB_LDISKFSFS_DIRHASH_WANTS_DIR
+
+#
 # LB_CONFIG_LDISKFS
 #
 AC_DEFUN([LB_CONFIG_LDISKFS], [
 # LB_CONFIG_LDISKFS
 #
 AC_DEFUN([LB_CONFIG_LDISKFS], [
@@ -336,6 +426,8 @@ AS_IF([test x$enable_ldiskfs != xno],[
        LB_EXT4_HAVE_I_CRYPT_INFO
        LB_LDISKFS_IGET_HAS_FLAGS_ARG
        LB_HAVE_BVEC_ITER_ALL
        LB_EXT4_HAVE_I_CRYPT_INFO
        LB_LDISKFS_IGET_HAS_FLAGS_ARG
        LB_HAVE_BVEC_ITER_ALL
+       LB_LDISKFS_FIND_ENTRY_LOCKED_EXISTS
+       LB_LDISKFSFS_DIRHASH_WANTS_DIR
        AC_DEFINE(CONFIG_LDISKFS_FS_POSIX_ACL, 1, [posix acls for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_SECURITY, 1, [fs security for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_XATTR, 1, [extened attributes for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_POSIX_ACL, 1, [posix acls for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_SECURITY, 1, [fs security for ldiskfs])
        AC_DEFINE(CONFIG_LDISKFS_FS_XATTR, 1, [extened attributes for ldiskfs])
index f52c037..f196ef0 100644 (file)
@@ -420,8 +420,8 @@ AM_CONDITIONAL([DOC], [test x$ENABLE_DOC = x1])
 AM_CONDITIONAL([MANPAGES], [test x$enable_manpages = xyes])
 AM_CONDITIONAL([LINUX], [test x$lb_target_os = xlinux])
 AM_CONDITIONAL([USE_QUILT], [test x$use_quilt = xyes])
 AM_CONDITIONAL([MANPAGES], [test x$enable_manpages = xyes])
 AM_CONDITIONAL([LINUX], [test x$lb_target_os = xlinux])
 AM_CONDITIONAL([USE_QUILT], [test x$use_quilt = xyes])
-AM_CONDITIONAL([RHEL], [test x$RHEL_KERNEL = xyes])
-AM_CONDITIONAL([SUSE], [test x$SUSE_KERNEL = xyes])
+AM_CONDITIONAL([RHEL], [test -f /etc/redhat-release])
+AM_CONDITIONAL([SUSE], [test -f /etc/SUSE-brand -o -f /etc/SuSE-release])
 AM_CONDITIONAL([UBUNTU], [test x$UBUNTU_KERNEL = xyes])
 
 LN_CONDITIONALS
 AM_CONDITIONAL([UBUNTU], [test x$UBUNTU_KERNEL = xyes])
 
 LN_CONDITIONALS
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/export-ext4fs-dirhash-helper.patch b/ldiskfs/kernel_patches/patches/linux-5.4/export-ext4fs-dirhash-helper.patch
new file mode 100644 (file)
index 0000000..8911618
--- /dev/null
@@ -0,0 +1,37 @@
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index bb6b6be..35ef40b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2585,6 +2585,9 @@ extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
+ extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
+                         struct dx_hash_info *hinfo);
++extern int __ext4fs_dirhash(const char *name, int len,
++                          struct dx_hash_info *hinfo);
++
+ /* ialloc.c */
+ extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
+                                     const struct qstr *qstr, __u32 goal,
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index d358bfc..f7d575a 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -197,8 +197,8 @@ static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
+  * represented, and whether or not the returned hash is 32 bits or 64
+  * bits.  32 bit hashes will return 0 for the minor hash.
+  */
+-static int __ext4fs_dirhash(const char *name, int len,
+-                          struct dx_hash_info *hinfo)
++int __ext4fs_dirhash(const char *name, int len,
++                   struct dx_hash_info *hinfo)
+ {
+       __u32   hash;
+       __u32   minor_hash = 0;
+@@ -270,6 +270,7 @@ static int __ext4fs_dirhash(const char *name, int len,
+       hinfo->minor_hash = minor_hash;
+       return 0;
+ }
++EXPORT_SYMBOL(__ext4fs_dirhash);
+ int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
+                  struct dx_hash_info *hinfo)
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-attach-jinode-in-writepages.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-attach-jinode-in-writepages.patch
new file mode 100644 (file)
index 0000000..b7f5e37
--- /dev/null
@@ -0,0 +1,58 @@
+From 01da8ce642e08594db95d940b3352ad7ee153b09 Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Tue, 6 Aug 2019 17:11:57 -0500
+Subject: [PATCH] + linux-5.3/ext4-attach-jinode-in-writepages
+
+---
+ fs/ext4/ext4.h  | 1 +
+ fs/ext4/inode.c | 8 ++++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 65c02d6..f28104a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2672,6 +2672,7 @@ extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+ extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid);
+ /* inode.c */
++#define HAVE_LDISKFS_INFO_JINODE
+ int ext4_inode_is_fast_symlink(struct inode *inode);
+ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
+ struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 434b256..0fd5f4e 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -735,6 +735,10 @@ out_sem:
+                               (loff_t)map->m_lblk << inode->i_blkbits;
+                       loff_t length = (loff_t)map->m_len << inode->i_blkbits;
++                      ret = ext4_inode_attach_jinode(inode);
++                      if (ret)
++                              return ret;
++
+                       if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+                               ret = ext4_jbd2_inode_add_wait(handle, inode,
+                                               start_byte, length);
+@@ -2829,6 +2833,9 @@ static int ext4_writepages(struct address_space *mapping,
+               mpd.last_page = wbc->range_end >> PAGE_SHIFT;
+       }
++      ret = ext4_inode_attach_jinode(inode);
++      if (ret)
++              goto out_writepages;
+       mpd.inode = inode;
+       mpd.wbc = wbc;
+       ext4_io_submit_init(&mpd.io_submit, wbc);
+@@ -4446,6 +4453,7 @@ int ext4_inode_attach_jinode(struct inode *inode)
+               jbd2_free_inode(jinode);
+       return 0;
+ }
++EXPORT_SYMBOL(ext4_inode_attach_jinode);
+ /*
+  * ext4_truncate()
+-- 
+2.20.1
+
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-data-in-dirent.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-data-in-dirent.patch
new file mode 100644 (file)
index 0000000..3c3fffc
--- /dev/null
@@ -0,0 +1,784 @@
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index c7843b1..191304f 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -70,11 +70,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+       const int rlen = ext4_rec_len_from_disk(de->rec_len,
+                                               dir->i_sb->s_blocksize);
+-      if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
++      if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
+               error_msg = "rec_len is smaller than minimal";
+       else if (unlikely(rlen % 4 != 0))
+               error_msg = "rec_len % 4 != 0";
+-      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
++      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
+               error_msg = "rec_len is too small for name_len";
+       else if (unlikely(((char *) de - buf) + rlen > size))
+               error_msg = "directory entry overrun";
+@@ -219,7 +219,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+                                * failure will be detected in the
+                                * dirent test below. */
+                               if (ext4_rec_len_from_disk(de->rec_len,
+-                                      sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
++                                  sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
+                                       break;
+                               i += ext4_rec_len_from_disk(de->rec_len,
+                                                           sb->s_blocksize);
+@@ -442,12 +442,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
+       struct fname *fname, *new_fn;
+       struct dir_private_info *info;
+       int len;
++      int extra_data = 0;
+       info = dir_file->private_data;
+       p = &info->root.rb_node;
+       /* Create and allocate the fname structure */
+-      len = sizeof(struct fname) + ent_name->len + 1;
++      if (dirent->file_type & EXT4_DIRENT_LUFID)
++              extra_data = ext4_get_dirent_data_len(dirent);
++
++      len = sizeof(struct fname) + ent_name->len + extra_data + 1;
++
+       new_fn = kzalloc(len, GFP_KERNEL);
+       if (!new_fn)
+               return -ENOMEM;
+@@ -456,7 +461,7 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
+       new_fn->inode = le32_to_cpu(dirent->inode);
+       new_fn->name_len = ent_name->len;
+       new_fn->file_type = dirent->file_type;
+-      memcpy(new_fn->name, ent_name->name, ent_name->len);
++      memcpy(new_fn->name, ent_name->name, ent_name->len + extra_data);
+       new_fn->name[ent_name->len] = 0;
+       while (*p) {
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index ce12383..e89e6ce 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1069,6 +1069,7 @@ struct ext4_inode_info {
+       __u32 i_csum_seed;
+       kprojid_t i_projid;
++      void *i_dirdata;
+ };
+ /*
+@@ -1112,6 +1113,7 @@ struct ext4_inode_info {
+ #define EXT4_MOUNT_POSIX_ACL          0x08000 /* POSIX Access Control Lists */
+ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC   0x10000 /* No auto delalloc mapping */
+ #define EXT4_MOUNT_BARRIER            0x20000 /* Use block barriers */
++#define EXT4_MOUNT_DIRDATA            0x40000 /* Data in directory entries*/
+ #define EXT4_MOUNT_QUOTA              0x40000 /* Some quota option set */
+ #define EXT4_MOUNT_USRQUOTA           0x80000 /* "old" user quota,
+                                                * enable enforcement for hidden
+@@ -1804,6 +1806,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(casefold,            CASEFOLD)
+                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+                                        EXT4_FEATURE_INCOMPAT_EA_INODE| \
+                                        EXT4_FEATURE_INCOMPAT_MMP | \
++                                       EXT4_FEATURE_INCOMPAT_DIRDATA| \
+                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
+                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
+                                        EXT4_FEATURE_INCOMPAT_CASEFOLD | \
+@@ -1980,6 +1983,43 @@ struct ext4_dir_entry_tail {
+ #define EXT4_FT_SYMLINK               7
+ #define EXT4_FT_MAX           8
++#define EXT4_FT_MASK          0xf
++
++#if EXT4_FT_MAX > EXT4_FT_MASK
++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
++#endif
++
++/*
++ * d_type has 4 unused bits, so it can hold four types data. these different
++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
++ * stored, in flag order, after file-name in ext4 dirent.
++*/
++/*
++ * this flag is added to d_type if ext4 dirent has extra data after
++ * filename. this data length is variable and length is stored in first byte
++ * of data. data start after filename NUL byte.
++ * This is used by Lustre FS.
++  */
++#define EXT4_DIRENT_LUFID             0x10
++
++#define EXT4_LUFID_MAGIC    0xAD200907UL
++struct ext4_dentry_param {
++      __u32  edp_magic;       /* EXT4_LUFID_MAGIC */
++      char   edp_len;         /* size of edp_data in bytes */
++      char   edp_data[0];     /* packed array of data */
++} __packed;
++
++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
++                                                struct ext4_dentry_param *p)
++
++{
++      if (!ext4_has_feature_dirdata(sb))
++              return NULL;
++      if (p && p->edp_magic == EXT4_LUFID_MAGIC)
++              return &p->edp_len;
++      else
++              return NULL;
++}
+ #define EXT4_FT_DIR_CSUM      0xDE
+@@ -1990,8 +2030,11 @@ struct ext4_dir_entry_tail {
+  */
+ #define EXT4_DIR_PAD                  4
+ #define EXT4_DIR_ROUND                        (EXT4_DIR_PAD - 1)
+-#define EXT4_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXT4_DIR_ROUND) & \
++#define __EXT4_DIR_REC_LEN(name_len)  (((name_len) + 8 + EXT4_DIR_ROUND) & \
+                                        ~EXT4_DIR_ROUND)
++#define EXT4_DIR_REC_LEN(de)          (__EXT4_DIR_REC_LEN((de)->name_len +\
++                                      ext4_get_dirent_data_len(de)))
++
+ #define EXT4_MAX_REC_LEN              ((1<<16)-1)
+ /*
+@@ -2418,11 +2461,11 @@ extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+                            struct buffer_head *bh,
+                            void *buf, int buf_size,
+                            struct ext4_filename *fname,
+-                           struct ext4_dir_entry_2 **dest_de);
++                           struct ext4_dir_entry_2 **dest_de, int *dlen);
+ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+-                      struct ext4_filename *fname);
++                      struct ext4_filename *fname, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
+       if (!ext4_has_feature_dir_index(inode->i_sb))
+@@ -2434,10 +2477,17 @@ static const unsigned char ext4_filetype_table[] = {
+ static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+-      if (!ext4_has_feature_filetype(sb) || filetype >= EXT4_FT_MAX)
++      int fl_index = filetype & EXT4_FT_MASK;
++
++      if (!ext4_has_feature_filetype(sb) || fl_index >= EXT4_FT_MAX)
+               return DT_UNKNOWN;
+-      return ext4_filetype_table[filetype];
++      if (!test_opt(sb, DIRDATA))
++              return ext4_filetype_table[fl_index];
++
++      return (ext4_filetype_table[fl_index]) |
++              (filetype & EXT4_DIRENT_LUFID);
++
+ }
+ extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
+                            void *buf, int buf_size);
+@@ -2600,6 +2650,8 @@ extern struct inode *ext4_create_inode(handle_t *handle,
+ extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
+                            struct ext4_dir_entry_2 *de_del,
+                            struct buffer_head *bh);
++extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++                             struct inode *inode, const void *, const void *);
+ extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+                               __u32 start_minor_hash, __u32 *next_hash);
+ extern int ext4_search_dir(struct buffer_head *bh,
+@@ -3335,6 +3387,36 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
+ extern const struct iomap_ops ext4_iomap_ops;
++/*
++ * Compute the total directory entry data length.
++ * This includes the filename and an implicit NUL terminator (always present),
++ * and optional extensions.  Each extension has a bit set in the high 4 bits of
++ * de->file_type, and the extension length is the first byte in each entry.
++ */
++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
++{
++      char *len = de->name + de->name_len + 1 /* NUL terminator */;
++      int dlen = 0;
++      __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
++      struct ext4_dir_entry_tail *t = (struct ext4_dir_entry_tail *)de;
++
++      if (!t->det_reserved_zero1 &&
++          le16_to_cpu(t->det_rec_len) ==
++              sizeof(struct ext4_dir_entry_tail) &&
++          !t->det_reserved_zero2 &&
++          t->det_reserved_ft == EXT4_FT_DIR_CSUM)
++              return 0;
++
++      while (extra_data_flags) {
++              if (extra_data_flags & 1) {
++                      dlen += *len + (dlen == 0);
++                      len += *len;
++              }
++              extra_data_flags >>= 1;
++      }
++      return dlen;
++}
++
+ #endif        /* __KERNEL__ */
+ #define EFSBADCRC     EBADMSG         /* Bad CRC detected */
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index f73bc39..7610cfe 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1023,7 +1023,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
+       struct ext4_dir_entry_2 *de;
+       err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+-                              inline_size, fname, &de);
++                              inline_size, fname, &de, NULL);
+       if (err)
+               return err;
+@@ -1031,7 +1031,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
+       err = ext4_journal_get_write_access(handle, iloc->bh);
+       if (err)
+               return err;
+-      ext4_insert_dentry(inode, de, inline_size, fname);
++      ext4_insert_dentry(inode, de, inline_size, fname, NULL);
+       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
+@@ -1100,7 +1100,7 @@ static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
+       int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
+       int new_size = get_max_inline_xattr_value_size(dir, iloc);
+-      if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
++      if (new_size - old_size <= __EXT4_DIR_REC_LEN(1))
+               return -ENOSPC;
+       ret = ext4_update_inline_data(handle, dir,
+@@ -1381,7 +1381,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
+                       fake.name_len = 1;
+                       strcpy(fake.name, ".");
+                       fake.rec_len = ext4_rec_len_to_disk(
+-                                              EXT4_DIR_REC_LEN(fake.name_len),
++                                              EXT4_DIR_REC_LEN(&fake),
+                                               inline_size);
+                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+                       de = &fake;
+@@ -1391,7 +1391,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
+                       fake.name_len = 2;
+                       strcpy(fake.name, "..");
+                       fake.rec_len = ext4_rec_len_to_disk(
+-                                              EXT4_DIR_REC_LEN(fake.name_len),
++                                              EXT4_DIR_REC_LEN(&fake),
+                                               inline_size);
+                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+                       de = &fake;
+@@ -1489,8 +1489,8 @@ int ext4_read_inline_dir(struct file *file,
+        * So we will use extra_offset and extra_size to indicate them
+        * during the inline dir iteration.
+        */
+-      dotdot_offset = EXT4_DIR_REC_LEN(1);
+-      dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
++      dotdot_offset = __EXT4_DIR_REC_LEN(1);
++      dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2);
+       extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
+       extra_size = extra_offset + inline_size;
+@@ -1525,7 +1525,7 @@ int ext4_read_inline_dir(struct file *file,
+                        * failure will be detected in the
+                        * dirent test below. */
+                       if (ext4_rec_len_from_disk(de->rec_len, extra_size)
+-                              < EXT4_DIR_REC_LEN(1))
++                              < __EXT4_DIR_REC_LEN(1))
+                               break;
+                       i += ext4_rec_len_from_disk(de->rec_len,
+                                                   extra_size);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index be8c35d..7c09936 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -245,7 +245,8 @@ static unsigned dx_get_count(struct dx_entry *entries);
+ static unsigned dx_get_limit(struct dx_entry *entries);
+ static void dx_set_count(struct dx_entry *entries, unsigned value);
+ static void dx_set_limit(struct dx_entry *entries, unsigned value);
+-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
++static inline unsigned dx_root_limit(struct inode *dir,
++              struct ext4_dir_entry_2 *dot_de, unsigned infosize);
+ static unsigned dx_node_limit(struct inode *dir);
+ static struct dx_frame *dx_probe(struct ext4_filename *fname,
+                                struct inode *dir,
+@@ -388,22 +389,23 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+ {
+       struct ext4_dir_entry *dp;
+       struct dx_root_info *root;
+-      int count_offset;
++      int count_offset, dot_rec_len, dotdot_rec_len;
+       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+               count_offset = 8;
+-      else if (le16_to_cpu(dirent->rec_len) == 12) {
+-              dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
++      else {
++              dot_rec_len = le16_to_cpu(dirent->rec_len);
++              dp = (struct ext4_dir_entry *)(((void *)dirent) + dot_rec_len);
+               if (le16_to_cpu(dp->rec_len) !=
+-                  EXT4_BLOCK_SIZE(inode->i_sb) - 12)
++                  EXT4_BLOCK_SIZE(inode->i_sb) - dot_rec_len)
+                       return NULL;
+-              root = (struct dx_root_info *)(((void *)dp + 12));
++              dotdot_rec_len = EXT4_DIR_REC_LEN((struct ext4_dir_entry_2 *)dp);
++              root = (struct dx_root_info *)(((void *)dp + dotdot_rec_len));
+               if (root->reserved_zero ||
+                   root->info_length != sizeof(struct dx_root_info))
+                       return NULL;
+-              count_offset = 32;
+-      } else
+-              return NULL;
++              count_offset = 8 + dot_rec_len + dotdot_rec_len;
++      }
+       if (offset)
+               *offset = count_offset;
+@@ -508,11 +510,12 @@ ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
+  */
+ struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
+ {
++      BUG_ON(de->name_len != 1);
+       /* get dotdot first */
+-      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+       /* dx root info is after dotdot entry */
+-      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+       return (struct dx_root_info *)de;
+ }
+@@ -557,10 +560,16 @@ static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
+       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+ }
+-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
++static inline unsigned dx_root_limit(struct inode *dir,
++              struct ext4_dir_entry_2 *dot_de, unsigned infosize)
+ {
+-      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+-              EXT4_DIR_REC_LEN(2) - infosize;
++      struct ext4_dir_entry_2 *dotdot_de;
++      unsigned entry_space;
++
++      BUG_ON(dot_de->name_len != 1);
++      dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
++      entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) -
++                       EXT4_DIR_REC_LEN(dotdot_de) - infosize;
+       if (ext4_has_metadata_csum(dir->i_sb))
+               entry_space -= sizeof(struct dx_tail);
+@@ -569,7 +578,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
+ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+-      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
++      unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
+       if (ext4_has_metadata_csum(dir->i_sb))
+               entry_space -= sizeof(struct dx_tail);
+@@ -681,7 +690,7 @@ static struct stats dx_show_leaf(struct inode *dir,
+                                      (unsigned) ((char *) de - base));
+ #endif
+                       }
+-                      space += EXT4_DIR_REC_LEN(de->name_len);
++                      space += EXT4_DIR_REC_LEN(de);
+                       names++;
+               }
+               de = ext4_next_entry(de, size);
+@@ -788,11 +797,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+       entries = (struct dx_entry *)(((char *)info) + info->info_length);
+-      if (dx_get_limit(entries) != dx_root_limit(dir,
+-                                                 info->info_length)) {
++      if (dx_get_limit(entries) !=
++          dx_root_limit(dir, (struct ext4_dir_entry_2 *)frame->bh->b_data,
++                        info->info_length)) {
+               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
+                                  dx_get_limit(entries),
+-                                 dx_root_limit(dir, info->info_length));
++                                 dx_root_limit(dir,
++                                        (struct ext4_dir_entry_2 *)frame->bh->b_data,
++                                        info->info_length));
+               goto fail;
+       }
+@@ -987,7 +999,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       top = (struct ext4_dir_entry_2 *) ((char *) de +
+                                          dir->i_sb->s_blocksize -
+-                                         EXT4_DIR_REC_LEN(0));
++                                         __EXT4_DIR_REC_LEN(0));
+ #ifdef CONFIG_FS_ENCRYPTION
+       /* Check if the directory is encrypted */
+       if (IS_ENCRYPTED(dir)) {
+@@ -1741,7 +1753,7 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
+       while (count--) {
+               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
+                                               (from + (map->offs<<2));
+-              rec_len = EXT4_DIR_REC_LEN(de->name_len);
++              rec_len = EXT4_DIR_REC_LEN(de);
+               memcpy (to, de, rec_len);
+               ((struct ext4_dir_entry_2 *) to)->rec_len =
+                               ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1765,7 +1777,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
+       while ((char*)de < base + blocksize) {
+               next = ext4_next_entry(de, blocksize);
+               if (de->inode && de->name_len) {
+-                      rec_len = EXT4_DIR_REC_LEN(de->name_len);
++                      rec_len = EXT4_DIR_REC_LEN(de);
+                       if (de > to)
+                               memmove(to, de, rec_len);
+                       to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1896,14 +1908,16 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+                     struct buffer_head *bh,
+                     void *buf, int buf_size,
+                     struct ext4_filename *fname,
+-                    struct ext4_dir_entry_2 **dest_de)
++                    struct ext4_dir_entry_2 **dest_de, int *dlen)
+ {
+       struct ext4_dir_entry_2 *de;
+-      unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
++      unsigned short reclen = __EXT4_DIR_REC_LEN(fname_len(fname)) +
++                                                (dlen ? *dlen : 0);
+       int nlen, rlen;
+       unsigned int offset = 0;
+       char *top;
++      dlen ? *dlen = 0 : 0; /* default set to 0 */
+       de = (struct ext4_dir_entry_2 *)buf;
+       top = buf + buf_size - reclen;
+       while ((char *) de <= top) {
+@@ -1912,10 +1926,26 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+                       return -EFSCORRUPTED;
+               if (ext4_match(dir, fname, de))
+                       return -EEXIST;
+-              nlen = EXT4_DIR_REC_LEN(de->name_len);
++              nlen = EXT4_DIR_REC_LEN(de);
+               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+               if ((de->inode ? rlen - nlen : rlen) >= reclen)
+                       break;
++              /* Then for dotdot entries, check for the smaller space
++               * required for just the entry, no FID */
++              if (fname_len(fname) == 2 && memcmp(fname_name(fname), "..", 2) == 0) {
++                      if ((de->inode ? rlen - nlen : rlen) >=
++                          __EXT4_DIR_REC_LEN(fname_len(fname))) {
++                              /* set dlen=1 to indicate not
++                               * enough space store fid */
++                              dlen ? *dlen = 1 : 0;
++                              break;
++                      }
++                      /* The new ".." entry must be written over the
++                       * previous ".." entry, which is the first
++                       * entry traversed by this scan. If it doesn't
++                       * fit, something is badly wrong, so -EIO. */
++                      return -EIO;
++              }
+               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+               offset += rlen;
+       }
+@@ -1929,12 +1959,12 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+-                      struct ext4_filename *fname)
++                      struct ext4_filename *fname, void *data)
+ {
+       int nlen, rlen;
+-      nlen = EXT4_DIR_REC_LEN(de->name_len);
++      nlen = EXT4_DIR_REC_LEN(de);
+       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+       if (de->inode) {
+               struct ext4_dir_entry_2 *de1 =
+@@ -1948,6 +1978,11 @@ void ext4_insert_dentry(struct inode *inode,
+       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
+       de->name_len = fname_len(fname);
+       memcpy(de->name, fname_name(fname), fname_len(fname));
++      if (data) {
++              de->name[fname_len(fname)] = 0;
++              memcpy(&de->name[fname_len(fname) + 1], data, *(char *)data);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+ }
+ /*
+@@ -1965,14 +2000,19 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+ {
+       unsigned int    blocksize = dir->i_sb->s_blocksize;
+       int             csum_size = 0;
+-      int             err;
++      int             err, dlen = 0;
++      unsigned char   *data;
++      data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
++                                              EXT4_I(inode)->i_dirdata);
+       if (ext4_has_metadata_csum(inode->i_sb))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+       if (!de) {
++              if (data)
++                      dlen = (*data) + 1;
+               err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+-                                      blocksize - csum_size, fname, &de);
++                                      blocksize - csum_size, fname, &de, &dlen);
+               if (err)
+                       return err;
+       }
+@@ -1984,7 +2024,10 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+       }
+       /* By now the buffer is marked for journaling */
+-      ext4_insert_dentry(inode, de, blocksize, fname);
++      /* If writing the short form of "dotdot", don't add the data section */
++      if (dlen == 1)
++              data = NULL;
++      ext4_insert_dentry(inode, de, blocksize, fname, data);
+       /*
+        * XXX shouldn't update any times until successful
+@@ -2093,7 +2136,8 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+       dx_set_block(entries, 1);
+       dx_set_count(entries, 1);
+-      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
++      dx_set_limit(entries, dx_root_limit(dir,
++                                       dot_de, sizeof(*dx_info)));
+       /* Initialize as for dx_probe */
+       fname->hinfo.hash_version = dx_info->hash_version;
+@@ -2143,6 +2187,8 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
+       struct buffer_head *dir_block;
+       struct ext4_dir_entry_2 *de;
+       int len, journal = 0, err = 0;
++      int dlen = 0;
++      char *data;
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -2160,19 +2206,24 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
+       /* the first item must be "." */
+       assert(de->name_len == 1 && de->name[0] == '.');
+       len = le16_to_cpu(de->rec_len);
+-      assert(len >= EXT4_DIR_REC_LEN(1));
+-      if (len > EXT4_DIR_REC_LEN(1)) {
++      assert(len >= __EXT4_DIR_REC_LEN(1));
++      if (len > __EXT4_DIR_REC_LEN(1)) {
+               BUFFER_TRACE(dir_block, "get_write_access");
+               err = ext4_journal_get_write_access(handle, dir_block);
+               if (err)
+                       goto out_journal;
+               journal = 1;
+-              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
+       }
+-      len -= EXT4_DIR_REC_LEN(1);
+-      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++      len -= EXT4_DIR_REC_LEN(de);
++      data = ext4_dentry_get_data(dir->i_sb,
++                      (struct ext4_dentry_param *)dentry->d_fsdata);
++      if (data)
++              dlen = *data + 1;
++      assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
++
+       de = (struct ext4_dir_entry_2 *)
+                       ((char *) de + le16_to_cpu(de->rec_len));
+       if (!journal) {
+@@ -2186,10 +2237,15 @@ static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
+       if (len > 0)
+               de->rec_len = cpu_to_le16(len);
+       else
+-              assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++              assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
+       de->name_len = 2;
+       strcpy(de->name, "..");
+-      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++      if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
++              de->name[2] = 0;
++              memcpy(&de->name[2 + 1], data, *data);
++              ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+ out_journal:
+       if (journal) {
+@@ -2229,6 +2285,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+       ext4_lblk_t block, blocks;
+       int     csum_size = 0;
++      EXT4_I(inode)->i_dirdata = dentry->d_fsdata;
+       if (ext4_has_metadata_csum(inode->i_sb))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+@@ -2757,37 +2814,70 @@ err_unlock_inode:
+       return err;
+ }
++struct tp_block {
++      struct inode *inode;
++      void *data1;
++      void *data2;
++};
++
+ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+                         struct ext4_dir_entry_2 *de,
+                         int blocksize, int csum_size,
+                         unsigned int parent_ino, int dotdot_real_len)
+ {
++      void *data1 = NULL, *data2 = NULL;
++      int dot_reclen = 0;
++
++      if (dotdot_real_len == 10) {
++              struct tp_block *tpb = (struct tp_block *)inode;
++              data1 = tpb->data1;
++              data2 = tpb->data2;
++              inode = tpb->inode;
++              dotdot_real_len = 0;
++      }
+       de->inode = cpu_to_le32(inode->i_ino);
+       de->name_len = 1;
+-      de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+-                                         blocksize);
+       strcpy(de->name, ".");
+       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++      /* get packed fid data*/
++      data1 = ext4_dentry_get_data(inode->i_sb,
++                              (struct ext4_dentry_param *) data1);
++      if (data1) {
++              de->name[1] = 0;
++              memcpy(&de->name[2], data1, *(char *) data1);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
++      de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
++      dot_reclen = cpu_to_le16(de->rec_len);
+       de = ext4_next_entry(de, blocksize);
+       de->inode = cpu_to_le32(parent_ino);
+       de->name_len = 2;
++      strcpy(de->name, "..");
++      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++      data2 = ext4_dentry_get_data(inode->i_sb,
++                      (struct ext4_dentry_param *) data2);
++      if (data2) {
++              de->name[2] = 0;
++              memcpy(&de->name[3], data2, *(char *) data2);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+       if (!dotdot_real_len)
+               de->rec_len = ext4_rec_len_to_disk(blocksize -
+-                                      (csum_size + EXT4_DIR_REC_LEN(1)),
++                                      (csum_size + dot_reclen),
+                                       blocksize);
+       else
+               de->rec_len = ext4_rec_len_to_disk(
+-                              EXT4_DIR_REC_LEN(de->name_len), blocksize);
+-      strcpy(de->name, "..");
+-      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++                              EXT4_DIR_REC_LEN(de), blocksize);
+       return ext4_next_entry(de, blocksize);
+ }
+ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+-                           struct inode *inode)
++                           struct inode *inode,
++                           const void *data1, const void *data2)
+ {
++      struct tp_block param;
+       struct buffer_head *dir_block = NULL;
+       struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
+@@ -2812,7 +2902,11 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+       if (IS_ERR(dir_block))
+               return PTR_ERR(dir_block);
+       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+-      ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
++      param.inode = inode;
++      param.data1 = (void *)data1;
++      param.data2 = (void *)data2;
++      ext4_init_dot_dotdot((struct inode *)(&param), de, blocksize,
++                           csum_size, dir->i_ino, 10);
+       set_nlink(inode, 2);
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+@@ -2829,6 +2923,29 @@ out:
+       return err;
+ }
++/* Initialize @inode as a subdirectory of @dir, and add the
++ * "." and ".." entries into the first directory block. */
++int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++                      struct inode *inode,
++                      const void *data1, const void *data2)
++{
++      int rc;
++
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      if (IS_DIRSYNC(dir))
++              ext4_handle_sync(handle);
++
++      inode->i_op = &ext4_dir_inode_operations;
++      inode->i_fop = &ext4_dir_operations;
++      rc = ext4_init_new_dir(handle, dir, inode, data1, data2);
++      if (!rc)
++              rc = ext4_mark_inode_dirty(handle, inode);
++      return rc;
++}
++EXPORT_SYMBOL(ext4_add_dot_dotdot);
++
+ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+       handle_t *handle;
+@@ -2855,7 +2972,7 @@ retry:
+       inode->i_op = &ext4_dir_inode_operations;
+       inode->i_fop = &ext4_dir_operations;
+-      err = ext4_init_new_dir(handle, dir, inode);
++      err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
+       if (err)
+               goto out_clear_inode;
+       err = ext4_mark_inode_dirty(handle, inode);
+@@ -2906,7 +3023,7 @@ bool ext4_empty_dir(struct inode *inode)
+       }
+       sb = inode->i_sb;
+-      if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
++      if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) {
+               EXT4_ERROR_INODE(inode, "invalid size");
+               return true;
+       }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 1f1342f..33f7e88 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1445,7 +1445,7 @@ enum {
+       Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
+       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+-      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
++      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
+       Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
+       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+       Opt_nowarn_on_error, Opt_mblk_io_submit,
+@@ -1521,6 +1521,7 @@ static const match_table_t tokens = {
+       {Opt_nolazytime, "nolazytime"},
+       {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
+       {Opt_nodelalloc, "nodelalloc"},
++      {Opt_dirdata, "dirdata"},
+       {Opt_removed, "mblk_io_submit"},
+       {Opt_removed, "nomblk_io_submit"},
+       {Opt_block_validity, "block_validity"},
+@@ -1744,6 +1745,7 @@ static const struct mount_opts {
+       {Opt_usrjquota, 0, MOPT_Q},
+       {Opt_grpjquota, 0, MOPT_Q},
+       {Opt_offusrjquota, 0, MOPT_Q},
++      {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
+       {Opt_offgrpjquota, 0, MOPT_Q},
+       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
+       {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-give-warning-with-dir-htree-growing.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-give-warning-with-dir-htree-growing.patch
new file mode 100644 (file)
index 0000000..3aadbf4
--- /dev/null
@@ -0,0 +1,156 @@
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 8203441..3ef90af 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1470,6 +1470,7 @@ struct ext4_sb_info {
+       unsigned long s_mb_prealloc_table_size;
+       unsigned int s_mb_group_prealloc;
+       unsigned int s_max_dir_size_kb;
++      unsigned long s_warning_dir_size;
+       /* where last allocation was done - for stream allocation */
+       unsigned long s_mb_last_group;
+       unsigned long s_mb_last_start;
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index d654387..d98e90b 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -755,12 +755,20 @@ struct ext4_dir_lock_data {
+ #define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
+ #define ext4_find_entry(dir, name, dirent, inline) \
+                       ext4_find_entry_locked(dir, name, dirent, inline, NULL)
+-#define ext4_add_entry(handle, dentry, inode) \
+-                      ext4_add_entry_locked(handle, dentry, inode, NULL)
+ /* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
+ #define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
++inline int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++                        struct inode *inode)
++{
++      int ret = ext4_add_entry_locked(handle, dentry, inode, NULL);
++
++      if (ret == -ENOBUFS)
++              ret = 0;
++      return ret;
++}
++
+ static void ext4_htree_event_cb(void *target, void *event)
+ {
+       u64 *block = (u64 *)target;
+@@ -2598,6 +2606,54 @@ out:
+       return err;
+ }
++static unsigned long __ext4_max_dir_size(struct dx_frame *frames,
++                             struct dx_frame *frame, struct inode *dir)
++{
++      unsigned long max_dir_size;
++
++      if (EXT4_SB(dir->i_sb)->s_max_dir_size_kb) {
++              max_dir_size = EXT4_SB(dir->i_sb)->s_max_dir_size_kb << 10;
++      } else {
++              max_dir_size = EXT4_BLOCK_SIZE(dir->i_sb);
++              while (frame >= frames) {
++                      max_dir_size *= dx_get_limit(frame->entries);
++                      if (frame == frames)
++                              break;
++                      frame--;
++              }
++              /* use 75% of max dir size in average */
++              max_dir_size = max_dir_size / 4 * 3;
++      }
++      return max_dir_size;
++}
++
++/*
++ * With hash tree growing, it is easy to hit ENOSPC, but it is hard
++ * to predict when it will happen. let's give administrators warning
++ * when reaching 3/5 and 2/3 of limit
++ */
++static inline bool dir_size_in_warning_range(struct dx_frame *frames,
++                                           struct dx_frame *frame,
++                                           struct inode *dir)
++{
++      unsigned long size1, size2;
++      struct super_block *sb = dir->i_sb;
++
++      if (unlikely(!EXT4_SB(sb)->s_warning_dir_size))
++              EXT4_SB(sb)->s_warning_dir_size =
++                      __ext4_max_dir_size(frames, frame, dir);
++
++      size1 = EXT4_SB(sb)->s_warning_dir_size / 16 * 10;
++      size1 = size1 & ~(EXT4_BLOCK_SIZE(sb) - 1);
++      size2 = EXT4_SB(sb)->s_warning_dir_size / 16 * 11;
++      size2 = size2 & ~(EXT4_BLOCK_SIZE(sb) - 1);
++      if (in_range(dir->i_size, size1, EXT4_BLOCK_SIZE(sb)) ||
++          in_range(dir->i_size, size2, EXT4_BLOCK_SIZE(sb)))
++              return true;
++
++      return false;
++}
++
+ /*
+  *    ext4_add_entry()
+  *
+@@ -2727,6 +2783,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+       struct ext4_dir_entry_2 *de;
+       int restart;
+       int err;
++      bool ret_warn = false;
+ again:
+       restart = 0;
+@@ -2755,6 +2812,11 @@ again:
+       /* Block full, should compress but for now just split */
+       dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
+                      dx_get_count(entries), dx_get_limit(entries)));
++
++      if (frame - frames + 1 >= ext4_dir_htree_level(sb) ||
++          EXT4_SB(sb)->s_warning_dir_size)
++              ret_warn = dir_size_in_warning_range(frames, frame, dir);
++
+       /* Need to split index? */
+       if (dx_get_count(entries) == dx_get_limit(entries)) {
+               ext4_lblk_t newblock;
+@@ -2918,6 +2980,8 @@ cleanup:
+        */
+       if (restart && err == 0)
+               goto again;
++      if (err == 0 && ret_warn)
++              err = -ENOBUFS;
+       return err;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 70cd294..132dd23 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1899,6 +1899,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+               sbi->s_li_wait_mult = arg;
+       } else if (token == Opt_max_dir_size_kb) {
+               sbi->s_max_dir_size_kb = arg;
++              /* reset s_warning_dir_size and make it re-calculated */
++              sbi->s_warning_dir_size = 0;
+       } else if (token == Opt_stripe) {
+               sbi->s_stripe = arg;
+       } else if (token == Opt_resuid) {
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index d24d6d9..13217a6 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -182,6 +182,7 @@ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+ EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size_kb);
+ EXT4_RW_ATTR_SBI_UI(max_dir_size_kb, s_max_dir_size_kb);
++EXT4_RW_ATTR_SBI_UI(warning_dir_size, s_warning_dir_size);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+@@ -214,6 +215,7 @@ static struct attribute *ext4_attrs[] = {
+       ATTR_LIST(inode_goal),
+       ATTR_LIST(max_dir_size),
+       ATTR_LIST(max_dir_size_kb),
++      ATTR_LIST(warning_dir_size),
+       ATTR_LIST(mb_stats),
+       ATTR_LIST(mb_max_to_scan),
+       ATTR_LIST(mb_min_to_scan),
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-hash-indexed-dir-dotdot-update.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-hash-indexed-dir-dotdot-update.patch
new file mode 100644 (file)
index 0000000..ef35db0
--- /dev/null
@@ -0,0 +1,101 @@
+From eb2cc3a5d4cfb8feee399507a8f2e2fe17f96cf4 Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Tue, 6 Aug 2019 18:12:53 -0500
+Subject: [PATCH] + linux-5.3/ext4-hash-indexed-dir-dotdot-update.patch
+
+---
+ fs/ext4/namei.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 71 insertions(+)
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index f54e868..14ff68e 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2174,6 +2174,74 @@ out_frames:
+       return retval;
+ }
++/* update ".." for hash-indexed directory, split the item "." if necessary */
++static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
++                            struct inode *inode)
++{
++      struct inode *dir = dentry->d_parent->d_inode;
++      struct buffer_head *dir_block;
++      struct ext4_dir_entry_2 *de;
++      int len, journal = 0, err = 0;
++
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      if (IS_DIRSYNC(dir))
++              handle->h_sync = 1;
++
++      dir_block = ext4_bread(handle, dir, 0, 0);
++      if (IS_ERR(dir_block)) {
++              err = PTR_ERR(dir_block);
++              goto out;
++      }
++
++      de = (struct ext4_dir_entry_2 *)dir_block->b_data;
++      /* the first item must be "." */
++      assert(de->name_len == 1 && de->name[0] == '.');
++      len = le16_to_cpu(de->rec_len);
++      assert(len >= EXT4_DIR_REC_LEN(1));
++      if (len > EXT4_DIR_REC_LEN(1)) {
++              BUFFER_TRACE(dir_block, "get_write_access");
++              err = ext4_journal_get_write_access(handle, dir_block);
++              if (err)
++                      goto out_journal;
++
++              journal = 1;
++              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++      }
++
++      len -= EXT4_DIR_REC_LEN(1);
++      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++      de = (struct ext4_dir_entry_2 *)
++                      ((char *) de + le16_to_cpu(de->rec_len));
++      if (!journal) {
++              BUFFER_TRACE(dir_block, "get_write_access");
++              err = ext4_journal_get_write_access(handle, dir_block);
++              if (err)
++                      goto out_journal;
++      }
++
++      de->inode = cpu_to_le32(inode->i_ino);
++      if (len > 0)
++              de->rec_len = cpu_to_le16(len);
++      else
++              assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++      de->name_len = 2;
++      strcpy(de->name, "..");
++      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++
++out_journal:
++      if (journal) {
++              BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
++              err = ext4_handle_dirty_dirblock(handle, dir, dir_block);
++              ext4_mark_inode_dirty(handle, dir);
++      }
++      brelse(dir_block);
++
++out:
++      return err;
++}
++
+ /*
+  *    ext4_add_entry()
+  *
+@@ -2229,6 +2297,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+       }
+       if (is_dx(dir)) {
++              if (dentry->d_name.len == 2 &&
++                   memcmp(dentry->d_name.name, "..", 2) == 0)
++                       return ext4_update_dotdot(handle, dentry, inode);
+               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       goto out;
+-- 
+2.20.1
+
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-kill-dx-root.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-kill-dx-root.patch
new file mode 100644 (file)
index 0000000..0b071b9
--- /dev/null
@@ -0,0 +1,241 @@
+From aa282f628e4ad75eea7f8ee1b26dea920c238241 Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Tue, 6 Aug 2019 17:00:55 -0500
+Subject: [PATCH] + linux-5.3/ext4-kill-dx-root
+
+---
+ fs/ext4/namei.c | 111 +++++++++++++++++++++++++-----------------------
+ 1 file changed, 58 insertions(+), 53 deletions(-)
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index a1db7ce..b0291ed 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -212,22 +212,13 @@ struct dx_entry
+  * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
+  */
+-struct dx_root
++struct dx_root_info
+ {
+-      struct fake_dirent dot;
+-      char dot_name[4];
+-      struct fake_dirent dotdot;
+-      char dotdot_name[4];
+-      struct dx_root_info
+-      {
+-              __le32 reserved_zero;
+-              u8 hash_version;
+-              u8 info_length; /* 8 */
+-              u8 indirect_levels;
+-              u8 unused_flags;
+-      }
+-      info;
+-      struct dx_entry entries[0];
++      __le32 reserved_zero;
++      u8 hash_version;
++      u8 info_length; /* 8 */
++      u8 indirect_levels;
++      u8 unused_flags;
+ };
+ struct dx_node
+@@ -529,6 +520,16 @@ ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
+  * Future: use high four bits of block for coalesce-on-delete flags
+  * Mask them off for now.
+  */
++struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
++{
++      /* get dotdot first */
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++
++      /* dx root info is after dotdot entry */
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++
++      return (struct dx_root_info *)de;
++}
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+@@ -753,7 +754,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+ {
+       unsigned count, indirect;
+       struct dx_entry *at, *entries, *p, *q, *m;
+-      struct dx_root *root;
++      struct dx_root_info *info;
+       struct dx_frame *frame = frame_in;
+       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
+       u32 hash;
+@@ -762,18 +763,17 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+       frame->bh = ext4_read_dirblock(dir, 0, INDEX);
+       if (IS_ERR(frame->bh))
+               return (struct dx_frame *) frame->bh;
+-
+-      root = (struct dx_root *) frame->bh->b_data;
+-      if (root->info.hash_version != DX_HASH_TEA &&
+-          root->info.hash_version != DX_HASH_HALF_MD4 &&
+-          root->info.hash_version != DX_HASH_LEGACY) {
+-              ext4_warning_inode(dir, "Unrecognised inode hash code %u for directory "
+-                                 "%lu", root->info.hash_version, dir->i_ino);
++      info = dx_get_dx_info((struct ext4_dir_entry_2 *)frame->bh->b_data);
++        if (info->hash_version != DX_HASH_TEA &&
++            info->hash_version != DX_HASH_HALF_MD4 &&
++            info->hash_version != DX_HASH_LEGACY) {
++              ext4_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
++                             "#%lu", info->hash_version, dir->i_ino);
+               goto fail;
+       }
+       if (fname)
+               hinfo = &fname->hinfo;
+-      hinfo->hash_version = root->info.hash_version;
++      hinfo->hash_version = info->hash_version;
+       if (hinfo->hash_version <= DX_HASH_TEA)
+               hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -781,13 +781,13 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+               ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
+       hash = hinfo->hash;
+-      if (root->info.unused_flags & 1) {
++      if (info->unused_flags & 1) {
+               ext4_warning_inode(dir, "Unimplemented hash flags: %#06x",
+-                                 root->info.unused_flags);
++                                 info->unused_flags);
+               goto fail;
+       }
+-      indirect = root->info.indirect_levels;
++      indirect = info->indirect_levels;
+       if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
+               ext4_warning(dir->i_sb,
+                            "Directory (ino: %lu) htree depth %#06x exceed"
+@@ -800,14 +800,13 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+               goto fail;
+       }
+-      entries = (struct dx_entry *)(((char *)&root->info) +
+-                                    root->info.info_length);
++      entries = (struct dx_entry *)(((char *)info) + info->info_length);
+       if (dx_get_limit(entries) != dx_root_limit(dir,
+-                                                 root->info.info_length)) {
++                                                 info->info_length)) {
+               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
+                                  dx_get_limit(entries),
+-                                 dx_root_limit(dir, root->info.info_length));
++                                 dx_root_limit(dir, info->info_length));
+               goto fail;
+       }
+@@ -892,7 +891,7 @@ static void dx_release(struct dx_frame *frames)
+       if (frames[0].bh == NULL)
+               return;
+-      info = &((struct dx_root *)frames[0].bh->b_data)->info;
++      info = dx_get_dx_info((struct ext4_dir_entry_2 *)frames[0].bh->b_data);
+       /* save local copy, "info" may be freed after brelse() */
+       indirect_levels = info->indirect_levels;
+       for (i = 0; i <= indirect_levels; i++) {
+@@ -2065,16 +2064,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+                           struct inode *inode, struct buffer_head *bh)
+ {
+       struct buffer_head *bh2;
+-      struct dx_root  *root;
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+       struct dx_entry *entries;
+-      struct ext4_dir_entry_2 *de, *de2;
++      struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+       char            *data2, *top;
+       unsigned        len;
+       int             retval;
+       unsigned        blocksize;
+       ext4_lblk_t  block;
+-      struct fake_dirent *fde;
++      struct dx_root_info *dx_info;
+       int csum_size = 0;
+       if (ext4_has_metadata_csum(inode->i_sb))
+@@ -2089,18 +2087,19 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+               brelse(bh);
+               return retval;
+       }
+-      root = (struct dx_root *) bh->b_data;
++
++      dot_de = (struct ext4_dir_entry_2 *)bh->b_data;
++      dotdot_de = ext4_next_entry(dot_de, blocksize);
+       /* The 0th block becomes the root, move the dirents out */
+-      fde = &root->dotdot;
+-      de = (struct ext4_dir_entry_2 *)((char *)fde +
+-              ext4_rec_len_from_disk(fde->rec_len, blocksize));
+-      if ((char *) de >= (((char *) root) + blocksize)) {
++      de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
++              ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
++      if ((char *)de >= (((char *)dot_de) + blocksize)) {
+               EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+               brelse(bh);
+               return -EFSCORRUPTED;
+       }
+-      len = ((char *) root) + (blocksize - csum_size) - (char *) de;
++      len = ((char *)dot_de) + (blocksize - csum_size) - (char *)de;
+       /* Allocate new block for the 0th block's dirents */
+       bh2 = ext4_append(handle, dir, &block);
+@@ -2123,19 +2122,24 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+               ext4_initialize_dirent_tail(bh2, blocksize);
+       /* Initialize the root; the dot dirents already exist */
+-      de = (struct ext4_dir_entry_2 *) (&root->dotdot);
+-      de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
+-                                         blocksize);
+-      memset (&root->info, 0, sizeof(root->info));
+-      root->info.info_length = sizeof(root->info);
+-      root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+-      entries = root->entries;
++      dotdot_de->rec_len =
++              ext4_rec_len_to_disk(blocksize - le16_to_cpu(dot_de->rec_len),
++                                   blocksize);
++
++      /* initialize hashing info */
++      dx_info = dx_get_dx_info(dot_de);
++      memset(dx_info, 0, sizeof(*dx_info));
++      dx_info->info_length = sizeof(*dx_info);
++      dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++
++      entries = (void *)dx_info + sizeof(*dx_info);
++
+       dx_set_block(entries, 1);
+       dx_set_count(entries, 1);
+-      dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
++      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
+       /* Initialize as for dx_probe */
+-      fname->hinfo.hash_version = root->info.hash_version;
++      fname->hinfo.hash_version = dx_info->hash_version;
+       if (fname->hinfo.hash_version <= DX_HASH_TEA)
+               fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -2488,7 +2492,7 @@ again:
+                               goto journal_error;
+                       }
+               } else {
+-                      struct dx_root *dxroot;
++                      struct dx_root_info *info;
+                       memcpy((char *) entries2, (char *) entries,
+                              icount * sizeof(struct dx_entry));
+                       dx_set_limit(entries2, dx_node_limit(dir));
+@@ -2496,8 +2500,9 @@ again:
+                       /* Set up root */
+                       dx_set_count(entries, 1);
+                       dx_set_block(entries + 0, newblock);
+-                      dxroot = (struct dx_root *)frames[0].bh->b_data;
+-                      dxroot->info.indirect_levels += 1;
++                      info = dx_get_dx_info((struct ext4_dir_entry_2 *)
++                                            frames[0].bh->b_data);
++                      info->indirect_levels = 1;
+                       dxtrace(printk(KERN_DEBUG
+                                      "Creating %d level index...\n",
+                                      dxroot->info.indirect_levels));
+-- 
+2.20.1
+
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-lookup-dotdot.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-lookup-dotdot.patch
new file mode 100644 (file)
index 0000000..f825eaa
--- /dev/null
@@ -0,0 +1,37 @@
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index cd01c4a..0aefa8e 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1663,6 +1663,32 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
+                       return ERR_PTR(-EPERM);
+               }
+       }
++      /* ".." shouldn't go into dcache to preserve dcache hierarchy
++       * otherwise we'll get parent being a child of actual child.
++       * see bug 10458 for details -bzzz */
++      if (inode && (dentry->d_name.name[0] == '.' &&
++                    (dentry->d_name.len == 1 || (dentry->d_name.len == 2 &&
++                                           dentry->d_name.name[1] == '.')))) {
++              struct dentry *goal = NULL;
++
++              /* first, look for an existing dentry - any one is good */
++              goal = d_find_any_alias(inode);
++              if (goal == NULL) {
++                      spin_lock(&dentry->d_lock);
++                      /* there is no alias, we need to make current dentry:
++                       *  a) inaccessible for __d_lookup()
++                       *  b) inaccessible for iopen */
++                      J_ASSERT(hlist_unhashed(&dentry->d_u.d_alias));
++                      dentry->d_flags |= DCACHE_NFSFS_RENAMED;
++                      /* this is d_instantiate() ... */
++                      hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
++                      dentry->d_inode = inode;
++                      spin_unlock(&dentry->d_lock);
++              }
++              if (goal)
++                      iput(inode);
++              return goal;
++      }
+ #ifdef CONFIG_UNICODE
+       if (!inode && IS_CASEFOLDED(dir)) {
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-misc.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-misc.patch
new file mode 100644 (file)
index 0000000..bbcd8ca
--- /dev/null
@@ -0,0 +1,24 @@
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 1342fe9..22250a4 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2686,6 +2686,8 @@ extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+ extern int ext4_init_inode_table(struct super_block *sb,
+                                ext4_group_t group, int barrier);
+ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
++extern struct buffer_head *
++ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group);
+ /* mballoc.c */
+ extern const struct file_operations ext4_seq_prealloc_table_fops;
+@@ -2805,6 +2807,10 @@ extern int ext4_ext_migrate(struct inode *);
+ extern int ext4_ind_migrate(struct inode *inode);
+ /* namei.c */
++extern struct buffer_head *ext4_append(handle_t *handle,
++                                     struct inode *inode,
++                                     ext4_lblk_t *block);
++
+ extern int ext4_dirblock_csum_verify(struct inode *inode,
+                                    struct buffer_head *bh);
+ extern int ext4_orphan_add(handle_t *, struct inode *);
diff --git a/ldiskfs/kernel_patches/patches/linux-5.4/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/linux-5.4/ext4-pdirop.patch
new file mode 100644 (file)
index 0000000..8ddf181
--- /dev/null
@@ -0,0 +1,1990 @@
+From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Wed, 15 Jan 2020 07:35:13 -0600
+Subject: [PATCH] Single directory performance is a critical for HPC workloads.
+ In a typical use case an application creates a separate output file for each
+ node and task in a job. As nodes and tasks increase, hundreds of thousands of
+ files may be created in a single directory within a short window of time.
+ Today, both filename lookup and file system modifying operations (such as
+ create and unlink) are protected with a single lock for an entire ldiskfs
+ directory. PDO project will remove this bottleneck by introducing a parallel
+ locking mechanism for entire ldiskfs directories. This work will enable
+ multiple application threads to simultaneously lookup, create and unlink in
+ parallel.
+
+This patch contains:
+ - pdirops support for ldiskfs
+ - integrate with osd-ldiskfs
+---
+ fs/ext4/Makefile           |   1 +
+ fs/ext4/ext4.h             |  78 ++++
+ fs/ext4/htree_lock.c       | 891 +++++++++++++++++++++++++++++++++++++
+ fs/ext4/namei.c            | 454 +++++++++++++++++--
+ fs/ext4/super.c            |   1 +
+ include/linux/htree_lock.h | 187 ++++++++
+ 6 files changed, 1572 insertions(+), 40 deletions(-)
+ create mode 100644 fs/ext4/htree_lock.c
+ create mode 100644 include/linux/htree_lock.h
+
+diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
+index b17ddc2..45a68cb 100644
+--- a/fs/ext4/Makefile
++++ b/fs/ext4/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
+ ext4-y        := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
+               extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
++              htree_lock.o \
+               indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
+               mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
+               super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 78893a6..72c355d 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -29,6 +29,7 @@
+ #include <linux/timer.h>
+ #include <linux/version.h>
+ #include <linux/wait.h>
++#include <linux/htree_lock.h>
+ #include <linux/sched/signal.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+@@ -961,6 +962,9 @@ struct ext4_inode_info {
+       __u32   i_dtime;
+       ext4_fsblk_t    i_file_acl;
++      /* following fields for parallel directory operations -bzzz */
++      struct semaphore i_append_sem;
++
+       /*
+        * i_block_group is the number of the block group which contains
+        * this file's inode.  Constant across the lifetime of the inode,
+@@ -2181,6 +2185,72 @@ struct dx_hash_info
+  */
+ #define HASH_NB_ALWAYS                1
++/* assume name-hash is protected by upper layer */
++#define EXT4_HTREE_LOCK_HASH  0
++
++enum ext4_pdo_lk_types {
++#if EXT4_HTREE_LOCK_HASH
++      EXT4_LK_HASH,
++#endif
++      EXT4_LK_DX,             /* index block */
++      EXT4_LK_DE,             /* directory entry block */
++      EXT4_LK_SPIN,           /* spinlock */
++      EXT4_LK_MAX,
++};
++
++/* read-only bit */
++#define EXT4_LB_RO(b)         (1 << (b))
++/* read + write, high bits for writer */
++#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
++
++enum ext4_pdo_lock_bits {
++      /* DX lock bits */
++      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
++      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
++      /* DE lock bits */
++      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
++      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
++      /* DX spinlock bits */
++      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
++      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
++      /* accurate searching */
++      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
++};
++
++enum ext4_pdo_lock_opc {
++      /* external */
++      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
++      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
++
++      /* internal */
++      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
++      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
++};
++
++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
++#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
++
++extern struct htree_lock *ext4_htree_lock_alloc(void);
++#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
++
++extern void ext4_htree_lock(struct htree_lock *lck,
++                          struct htree_lock_head *lhead,
++                          struct inode *dir, unsigned flags);
++#define ext4_htree_unlock(lck)                  htree_unlock(lck)
++
++extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
++                                      const struct qstr *d_name,
++                                      struct ext4_dir_entry_2 **res_dir,
++                                      int *inlined, struct htree_lock *lck);
++extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
++                    struct inode *inode, struct htree_lock *lck);
++
+ struct ext4_filename {
+       const struct qstr *usr_fname;
+       struct fscrypt_str disk_name;
+@@ -2548,8 +2618,16 @@ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_filename *fname, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
++      /* Disable it for ldiskfs, because going from a DX directory to
++       * a non-DX directory while it is in use will completely break
++       * the htree-locking.
++       * If we really want to support this operation in the future,
++       * we need to exclusively lock the directory at here which will
++       * increase complexity of code */
++#if 0
+       if (!ext4_has_feature_dir_index(inode->i_sb))
+               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++#endif
+ }
+ static const unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+diff --git a/fs/ext4/htree_lock.c b/fs/ext4/htree_lock.c
+new file mode 100644
+index 0000000..ee407ed
+--- /dev/null
++++ b/fs/ext4/htree_lock.c
+@@ -0,0 +1,891 @@
++/*
++ * fs/ext4/htree_lock.c
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++#include <linux/jbd2.h>
++#include <linux/hash.h>
++#include <linux/module.h>
++#include <linux/htree_lock.h>
++
++enum {
++      HTREE_LOCK_BIT_EX       = (1 << HTREE_LOCK_EX),
++      HTREE_LOCK_BIT_PW       = (1 << HTREE_LOCK_PW),
++      HTREE_LOCK_BIT_PR       = (1 << HTREE_LOCK_PR),
++      HTREE_LOCK_BIT_CW       = (1 << HTREE_LOCK_CW),
++      HTREE_LOCK_BIT_CR       = (1 << HTREE_LOCK_CR),
++};
++
++enum {
++      HTREE_LOCK_COMPAT_EX    = 0,
++      HTREE_LOCK_COMPAT_PW    = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
++      HTREE_LOCK_COMPAT_PR    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
++      HTREE_LOCK_COMPAT_CW    = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
++      HTREE_LOCK_COMPAT_CR    = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
++                                HTREE_LOCK_BIT_PW,
++};
++
++static int htree_lock_compat[] = {
++      [HTREE_LOCK_EX]         HTREE_LOCK_COMPAT_EX,
++      [HTREE_LOCK_PW]         HTREE_LOCK_COMPAT_PW,
++      [HTREE_LOCK_PR]         HTREE_LOCK_COMPAT_PR,
++      [HTREE_LOCK_CW]         HTREE_LOCK_COMPAT_CW,
++      [HTREE_LOCK_CR]         HTREE_LOCK_COMPAT_CR,
++};
++
++/* max allowed htree-lock depth.
++ * We only need depth=3 for ext4 although user can have higher value. */
++#define HTREE_LOCK_DEP_MAX    16
++
++#ifdef HTREE_LOCK_DEBUG
++
++static char *hl_name[] = {
++      [HTREE_LOCK_EX]         "EX",
++      [HTREE_LOCK_PW]         "PW",
++      [HTREE_LOCK_PR]         "PR",
++      [HTREE_LOCK_CW]         "CW",
++      [HTREE_LOCK_CR]         "CR",
++};
++
++/* lock stats */
++struct htree_lock_node_stats {
++      unsigned long long      blocked[HTREE_LOCK_MAX];
++      unsigned long long      granted[HTREE_LOCK_MAX];
++      unsigned long long      retried[HTREE_LOCK_MAX];
++      unsigned long long      events;
++};
++
++struct htree_lock_stats {
++      struct htree_lock_node_stats    nodes[HTREE_LOCK_DEP_MAX];
++      unsigned long long      granted[HTREE_LOCK_MAX];
++      unsigned long long      blocked[HTREE_LOCK_MAX];
++};
++
++static struct htree_lock_stats hl_stats;
++
++void htree_lock_stat_reset(void)
++{
++      memset(&hl_stats, 0, sizeof(hl_stats));
++}
++
++void htree_lock_stat_print(int depth)
++{
++      int     i;
++      int     j;
++
++      printk(KERN_DEBUG "HTREE LOCK STATS:\n");
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
++                     hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
++      }
++      for (i = 0; i < depth; i++) {
++              printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
++              for (j = 0; j < HTREE_LOCK_MAX; j++) {
++                      printk(KERN_DEBUG
++                              "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
++                              hl_name[j], hl_stats.nodes[i].granted[j],
++                              hl_stats.nodes[i].blocked[j],
++                              hl_stats.nodes[i].retried[j]);
++              }
++      }
++}
++
++#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
++#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
++#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
++#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
++#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
++#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
++
++#else /* !DEBUG */
++
++void htree_lock_stat_reset(void) {}
++void htree_lock_stat_print(int depth) {}
++
++#define lk_grant_inc(m)             do {} while (0)
++#define lk_block_inc(m)             do {} while (0)
++#define ln_grant_inc(d, m)    do {} while (0)
++#define ln_block_inc(d, m)    do {} while (0)
++#define ln_retry_inc(d, m)    do {} while (0)
++#define ln_event_inc(d)             do {} while (0)
++
++#endif /* DEBUG */
++
++EXPORT_SYMBOL(htree_lock_stat_reset);
++EXPORT_SYMBOL(htree_lock_stat_print);
++
++#define HTREE_DEP_ROOT                  (-1)
++
++#define htree_spin_lock(lhead, dep)                           \
++      bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
++#define htree_spin_unlock(lhead, dep)                         \
++      bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
++
++#define htree_key_event_ignore(child, ln)                     \
++      (!((child)->lc_events & (1 << (ln)->ln_mode)))
++
++static int
++htree_key_list_empty(struct htree_lock_node *ln)
++{
++      return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
++}
++
++static void
++htree_key_list_del_init(struct htree_lock_node *ln)
++{
++      struct htree_lock_node *tmp = NULL;
++
++      if (!list_empty(&ln->ln_minor_list)) {
++              tmp = list_entry(ln->ln_minor_list.next,
++                               struct htree_lock_node, ln_minor_list);
++              list_del_init(&ln->ln_minor_list);
++      }
++
++      if (list_empty(&ln->ln_major_list))
++              return;
++
++      if (tmp == NULL) { /* not on minor key list */
++              list_del_init(&ln->ln_major_list);
++      } else {
++              BUG_ON(!list_empty(&tmp->ln_major_list));
++              list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
++      }
++}
++
++static void
++htree_key_list_replace_init(struct htree_lock_node *old,
++                          struct htree_lock_node *new)
++{
++      if (!list_empty(&old->ln_major_list))
++              list_replace_init(&old->ln_major_list, &new->ln_major_list);
++
++      if (!list_empty(&old->ln_minor_list))
++              list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
++}
++
++static void
++htree_key_event_enqueue(struct htree_lock_child *child,
++                      struct htree_lock_node *ln, int dep, void *event)
++{
++      struct htree_lock_node *tmp;
++
++      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++      BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
++      if (event == NULL || htree_key_event_ignore(child, ln))
++              return;
++
++      /* shouldn't be a very long list */
++      list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
++              if (tmp->ln_mode == HTREE_LOCK_NL) {
++                      ln_event_inc(dep);
++                      if (child->lc_callback != NULL)
++                              child->lc_callback(tmp->ln_ev_target, event);
++              }
++      }
++}
++
++static int
++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
++                      unsigned dep, int wait, void *event)
++{
++      struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
++      struct htree_lock_node *newln = &newlk->lk_nodes[dep];
++      struct htree_lock_node *curln = &curlk->lk_nodes[dep];
++
++      /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++      /* NB: we only expect PR/PW lock mode at here, only these two modes are
++       * allowed for htree_node_lock(asserted in htree_node_lock_internal),
++       * NL is only used for listener, user can't directly require NL mode */
++      if ((curln->ln_mode == HTREE_LOCK_NL) ||
++          (curln->ln_mode != HTREE_LOCK_PW &&
++           newln->ln_mode != HTREE_LOCK_PW)) {
++              /* no conflict, attach it on granted list of @curlk */
++              if (curln->ln_mode != HTREE_LOCK_NL) {
++                      list_add(&newln->ln_granted_list,
++                               &curln->ln_granted_list);
++              } else {
++                      /* replace key owner */
++                      htree_key_list_replace_init(curln, newln);
++              }
++
++              list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++              htree_key_event_enqueue(child, newln, dep, event);
++              ln_grant_inc(dep, newln->ln_mode);
++              return 1; /* still hold lh_lock */
++      }
++
++      if (!wait) { /* can't grant and don't want to wait */
++              ln_retry_inc(dep, newln->ln_mode);
++              newln->ln_mode = HTREE_LOCK_INVAL;
++              return -1; /* don't wait and just return -1 */
++      }
++
++      newlk->lk_task = current;
++      set_current_state(TASK_UNINTERRUPTIBLE);
++      /* conflict, attach it on blocked list of curlk */
++      list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
++      list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++      ln_block_inc(dep, newln->ln_mode);
++
++      htree_spin_unlock(newlk->lk_head, dep);
++      /* wait to be given the lock */
++      if (newlk->lk_task != NULL)
++              schedule();
++      /* granted, no doubt, wake up will set me RUNNING */
++      if (event == NULL || htree_key_event_ignore(child, newln))
++              return 0; /* granted without lh_lock */
++
++      htree_spin_lock(newlk->lk_head, dep);
++      htree_key_event_enqueue(child, newln, dep, event);
++      return 1; /* still hold lh_lock */
++}
++
++/*
++ * get PR/PW access to particular tree-node according to @dep and @key,
++ * it will return -1 if @wait is false and can't immediately grant this lock.
++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
++ * @event if it's not NULL.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
++                       htree_lock_mode_t mode, u32 key, unsigned dep,
++                       int wait, void *event)
++{
++      LIST_HEAD(list);
++      struct htree_lock       *tmp;
++      struct htree_lock       *tmp2;
++      u16                     major;
++      u16                     minor;
++      u8                      reverse;
++      u8                      ma_bits;
++      u8                      mi_bits;
++
++      BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
++      BUG_ON(htree_node_is_granted(lck, dep));
++
++      key = hash_long(key, lhead->lh_hbits);
++
++      mi_bits = lhead->lh_hbits >> 1;
++      ma_bits = lhead->lh_hbits - mi_bits;
++
++      lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
++      lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
++      lck->lk_nodes[dep].ln_mode = mode;
++
++      /*
++       * The major key list is an ordered list, so searches are started
++       * at the end of the list that is numerically closer to major_key,
++       * so at most half of the list will be walked (for well-distributed
++       * keys). The list traversal aborts early if the expected key
++       * location is passed.
++       */
++      reverse = (major >= (1 << (ma_bits - 1)));
++
++      if (reverse) {
++              list_for_each_entry_reverse(tmp,
++                                      &lhead->lh_children[dep].lc_list,
++                                      lk_nodes[dep].ln_major_list) {
++                      if (tmp->lk_nodes[dep].ln_major_key == major) {
++                              goto search_minor;
++
++                      } else if (tmp->lk_nodes[dep].ln_major_key < major) {
++                              /* attach _after_ @tmp */
++                              list_add(&lck->lk_nodes[dep].ln_major_list,
++                                       &tmp->lk_nodes[dep].ln_major_list);
++                              goto out_grant_major;
++                      }
++              }
++
++              list_add(&lck->lk_nodes[dep].ln_major_list,
++                       &lhead->lh_children[dep].lc_list);
++              goto out_grant_major;
++
++      } else {
++              list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
++                                  lk_nodes[dep].ln_major_list) {
++                      if (tmp->lk_nodes[dep].ln_major_key == major) {
++                              goto search_minor;
++
++                      } else if (tmp->lk_nodes[dep].ln_major_key > major) {
++                              /* insert _before_ @tmp */
++                              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++                                      &tmp->lk_nodes[dep].ln_major_list);
++                              goto out_grant_major;
++                      }
++              }
++
++              list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++                            &lhead->lh_children[dep].lc_list);
++              goto out_grant_major;
++      }
++
++ search_minor:
++      /*
++       * NB: minor_key list doesn't have a "head", @list is just a
++       * temporary stub for helping list searching, make sure it's removed
++       * after searching.
++       * minor_key list is an ordered list too.
++       */
++      list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
++
++      reverse = (minor >= (1 << (mi_bits - 1)));
++
++      if (reverse) {
++              list_for_each_entry_reverse(tmp2, &list,
++                                          lk_nodes[dep].ln_minor_list) {
++                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++                              goto out_enqueue;
++
++                      } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
++                              /* attach _after_ @tmp2 */
++                              list_add(&lck->lk_nodes[dep].ln_minor_list,
++                                       &tmp2->lk_nodes[dep].ln_minor_list);
++                              goto out_grant_minor;
++                      }
++              }
++
++              list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
++
++      } else {
++              list_for_each_entry(tmp2, &list,
++                                  lk_nodes[dep].ln_minor_list) {
++                      if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++                              goto out_enqueue;
++
++                      } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
++                              /* insert _before_ @tmp2 */
++                              list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
++                                      &tmp2->lk_nodes[dep].ln_minor_list);
++                              goto out_grant_minor;
++                      }
++              }
++
++              list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
++      }
++
++ out_grant_minor:
++      if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
++              /* new lock @lck is the first one on minor_key list, which
++               * means it has the smallest minor_key and it should
++               * replace @tmp as minor_key owner */
++              list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
++                                &lck->lk_nodes[dep].ln_major_list);
++      }
++      /* remove the temporary head */
++      list_del(&list);
++
++ out_grant_major:
++      ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
++      return 1; /* granted with holding lh_lock */
++
++ out_enqueue:
++      list_del(&list); /* remove temprary head */
++      return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
++}
++
++/*
++ * release the key of @lck at level @dep, and grant any blocked locks.
++ * caller will still listen on @key if @event is not NULL, which means
++ * caller can see a event (by event_cb) while granting any lock with
++ * the same key at level @dep.
++ * NB: ALWAYS called holding lhead::lh_lock
++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
++ */
++static void
++htree_node_unlock_internal(struct htree_lock_head *lhead,
++                         struct htree_lock *curlk, unsigned dep, void *event)
++{
++      struct htree_lock_node  *curln = &curlk->lk_nodes[dep];
++      struct htree_lock       *grtlk = NULL;
++      struct htree_lock_node  *grtln;
++      struct htree_lock       *poslk;
++      struct htree_lock       *tmplk;
++
++      if (!htree_node_is_granted(curlk, dep))
++              return;
++
++      if (!list_empty(&curln->ln_granted_list)) {
++              /* there is another granted lock */
++              grtlk = list_entry(curln->ln_granted_list.next,
++                                 struct htree_lock,
++                                 lk_nodes[dep].ln_granted_list);
++              list_del_init(&curln->ln_granted_list);
++      }
++
++      if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
++              /*
++               * @curlk is the only granted lock, so we confirmed:
++               * a) curln is key owner (attached on major/minor_list),
++               *    so if there is any blocked lock, it should be attached
++               *    on curln->ln_blocked_list
++               * b) we always can grant the first blocked lock
++               */
++              grtlk = list_entry(curln->ln_blocked_list.next,
++                                 struct htree_lock,
++                                 lk_nodes[dep].ln_blocked_list);
++              BUG_ON(grtlk->lk_task == NULL);
++              wake_up_process(grtlk->lk_task);
++      }
++
++      if (event != NULL &&
++          lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
++              curln->ln_ev_target = event;
++              curln->ln_mode = HTREE_LOCK_NL; /* listen! */
++      } else {
++              curln->ln_mode = HTREE_LOCK_INVAL;
++      }
++
++      if (grtlk == NULL) { /* I must be the only one locking this key */
++              struct htree_lock_node *tmpln;
++
++              BUG_ON(htree_key_list_empty(curln));
++
++              if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
++                      return;
++
++              /* not listening */
++              if (list_empty(&curln->ln_alive_list)) { /* no more listener */
++                      htree_key_list_del_init(curln);
++                      return;
++              }
++
++              tmpln = list_entry(curln->ln_alive_list.next,
++                                 struct htree_lock_node, ln_alive_list);
++
++              BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
++
++              htree_key_list_replace_init(curln, tmpln);
++              list_del_init(&curln->ln_alive_list);
++
++              return;
++      }
++
++      /* have a granted lock */
++      grtln = &grtlk->lk_nodes[dep];
++      if (!list_empty(&curln->ln_blocked_list)) {
++              /* only key owner can be on both lists */
++              BUG_ON(htree_key_list_empty(curln));
++
++              if (list_empty(&grtln->ln_blocked_list)) {
++                      list_add(&grtln->ln_blocked_list,
++                               &curln->ln_blocked_list);
++              }
++              list_del_init(&curln->ln_blocked_list);
++      }
++      /*
++       * NB: this is the tricky part:
++       * We have only two modes for child-lock (PR and PW), also,
++       * only owner of the key (attached on major/minor_list) can be on
++       * both blocked_list and granted_list, so @grtlk must be one
++       * of these two cases:
++       *
++       * a) @grtlk is taken from granted_list, which means we've granted
++       *    more than one lock so @grtlk has to be PR, the first blocked
++       *    lock must be PW and we can't grant it at all.
++       *    So even @grtlk is not owner of the key (empty blocked_list),
++       *    we don't care because we can't grant any lock.
++       * b) we just grant a new lock which is taken from head of blocked
++       *    list, and it should be the first granted lock, and it should
++       *    be the first one linked on blocked_list.
++       *
++       * Either way, we can get correct result by iterating blocked_list
++       * of @grtlk, and don't have to bother on how to find out
++       * owner of current key.
++       */
++      list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
++                               lk_nodes[dep].ln_blocked_list) {
++              if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
++                  poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
++                      break;
++              /* grant all readers */
++              list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
++              list_add(&poslk->lk_nodes[dep].ln_granted_list,
++                       &grtln->ln_granted_list);
++
++              BUG_ON(poslk->lk_task == NULL);
++              wake_up_process(poslk->lk_task);
++      }
++
++      /* if @curln is the owner of this key, replace it with @grtln */
++      if (!htree_key_list_empty(curln))
++              htree_key_list_replace_init(curln, grtln);
++
++      if (curln->ln_mode == HTREE_LOCK_INVAL)
++              list_del_init(&curln->ln_alive_list);
++}
++
++/*
++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
++ * and 0 only if @wait is false and can't grant it immediately
++ */
++int
++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++                  u32 key, unsigned dep, int wait, void *event)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int rc;
++
++      BUG_ON(dep >= lck->lk_depth);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_spin_lock(lhead, dep);
++      rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
++      if (rc != 0)
++              htree_spin_unlock(lhead, dep);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_node_lock_try);
++
++/* it's wrapper of htree_node_unlock_internal */
++void
++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++
++      BUG_ON(dep >= lck->lk_depth);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_spin_lock(lhead, dep);
++      htree_node_unlock_internal(lhead, lck, dep, event);
++      htree_spin_unlock(lhead, dep);
++}
++EXPORT_SYMBOL(htree_node_unlock);
++
++/* stop listening on child-lock level @dep */
++void
++htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
++{
++      struct htree_lock_node *ln = &lck->lk_nodes[dep];
++      struct htree_lock_node *tmp;
++
++      BUG_ON(htree_node_is_granted(lck, dep));
++      BUG_ON(!list_empty(&ln->ln_blocked_list));
++      BUG_ON(!list_empty(&ln->ln_granted_list));
++
++      if (!htree_node_is_listening(lck, dep))
++              return;
++
++      htree_spin_lock(lck->lk_head, dep);
++      ln->ln_mode = HTREE_LOCK_INVAL;
++      ln->ln_ev_target = NULL;
++
++      if (htree_key_list_empty(ln)) { /* not owner */
++              list_del_init(&ln->ln_alive_list);
++              goto out;
++      }
++
++      /* I'm the owner... */
++      if (list_empty(&ln->ln_alive_list)) { /* no more listener */
++              htree_key_list_del_init(ln);
++              goto out;
++      }
++
++      tmp = list_entry(ln->ln_alive_list.next,
++                       struct htree_lock_node, ln_alive_list);
++
++      BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
++      htree_key_list_replace_init(ln, tmp);
++      list_del_init(&ln->ln_alive_list);
++ out:
++      htree_spin_unlock(lck->lk_head, dep);
++}
++EXPORT_SYMBOL(htree_node_stop_listen);
++
++/* release all child-locks if we have any */
++static void
++htree_node_release_all(struct htree_lock *lck)
++{
++      int     i;
++
++      for (i = 0; i < lck->lk_depth; i++) {
++              if (htree_node_is_granted(lck, i))
++                      htree_node_unlock(lck, i, NULL);
++              else if (htree_node_is_listening(lck, i))
++                      htree_node_stop_listen(lck, i);
++      }
++}
++
++/*
++ * obtain htree lock, it could be blocked inside if there's conflict
++ * with any granted or blocked lock and @wait is true.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_lock_internal(struct htree_lock *lck, int wait)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int     granted = 0;
++      int     blocked = 0;
++      int     i;
++
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              if (lhead->lh_ngranted[i] != 0)
++                      granted |= 1 << i;
++              if (lhead->lh_nblocked[i] != 0)
++                      blocked |= 1 << i;
++      }
++      if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
++          (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
++              /* will block current lock even it just conflicts with any
++               * other blocked lock, so lock like EX wouldn't starve */
++              if (!wait)
++                      return -1;
++              lhead->lh_nblocked[lck->lk_mode]++;
++              lk_block_inc(lck->lk_mode);
++
++              lck->lk_task = current;
++              list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
++
++retry:
++              set_current_state(TASK_UNINTERRUPTIBLE);
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++              /* wait to be given the lock */
++              if (lck->lk_task != NULL)
++                      schedule();
++              /* granted, no doubt. wake up will set me RUNNING.
++               * Since thread would be waken up accidentally,
++               * so we need check lock whether granted or not again. */
++              if (!list_empty(&lck->lk_blocked_list)) {
++                      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++                      if (list_empty(&lck->lk_blocked_list)) {
++                              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++                              return 0;
++                      }
++                      goto retry;
++              }
++              return 0; /* without lh_lock */
++      }
++      lhead->lh_ngranted[lck->lk_mode]++;
++      lk_grant_inc(lck->lk_mode);
++      return 1;
++}
++
++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
++static void
++htree_unlock_internal(struct htree_lock *lck)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      struct htree_lock *tmp;
++      struct htree_lock *tmp2;
++      int granted = 0;
++      int i;
++
++      BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
++
++      lhead->lh_ngranted[lck->lk_mode]--;
++      lck->lk_mode = HTREE_LOCK_INVAL;
++
++      for (i = 0; i < HTREE_LOCK_MAX; i++) {
++              if (lhead->lh_ngranted[i] != 0)
++                      granted |= 1 << i;
++      }
++      list_for_each_entry_safe(tmp, tmp2,
++                               &lhead->lh_blocked_list, lk_blocked_list) {
++              /* conflict with any granted lock? */
++              if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
++                      break;
++
++              list_del_init(&tmp->lk_blocked_list);
++
++              BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
++
++              lhead->lh_nblocked[tmp->lk_mode]--;
++              lhead->lh_ngranted[tmp->lk_mode]++;
++              granted |= 1 << tmp->lk_mode;
++
++              BUG_ON(tmp->lk_task == NULL);
++              wake_up_process(tmp->lk_task);
++      }
++}
++
++/* it's wrapper of htree_lock_internal and exported interface.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++             htree_lock_mode_t mode, int wait)
++{
++      int     rc;
++
++      BUG_ON(lck->lk_depth > lhead->lh_depth);
++      BUG_ON(lck->lk_head != NULL);
++      BUG_ON(lck->lk_task != NULL);
++
++      lck->lk_head = lhead;
++      lck->lk_mode = mode;
++
++      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++      rc = htree_lock_internal(lck, wait);
++      if (rc != 0)
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_lock_try);
++
++/* it's wrapper of htree_unlock_internal and exported interface.
++ * It will release all htree_node_locks and htree_lock */
++void
++htree_unlock(struct htree_lock *lck)
++{
++      BUG_ON(lck->lk_head == NULL);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++      htree_node_release_all(lck);
++
++      htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
++      htree_unlock_internal(lck);
++      htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
++      lck->lk_head = NULL;
++      lck->lk_task = NULL;
++}
++EXPORT_SYMBOL(htree_unlock);
++
++/* change lock mode */
++void
++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
++{
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++      lck->lk_mode = mode;
++}
++EXPORT_SYMBOL(htree_change_mode);
++
++/* release htree lock, and lock it again with new mode.
++ * This function will first release all htree_node_locks and htree_lock,
++ * then try to gain htree_lock with new @mode.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
++{
++      struct htree_lock_head *lhead = lck->lk_head;
++      int rc;
++
++      BUG_ON(lhead == NULL);
++      BUG_ON(lck->lk_mode == mode);
++      BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
++
++      htree_node_release_all(lck);
++
++      htree_spin_lock(lhead, HTREE_DEP_ROOT);
++      htree_unlock_internal(lck);
++      lck->lk_mode = mode;
++      rc = htree_lock_internal(lck, wait);
++      if (rc != 0)
++              htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++      return rc >= 0;
++}
++EXPORT_SYMBOL(htree_change_lock_try);
++
++/* create a htree_lock head with @depth levels (number of child-locks),
++ * it is a per resoruce structure */
++struct htree_lock_head *
++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
++{
++      struct htree_lock_head *lhead;
++      int  i;
++
++      if (depth > HTREE_LOCK_DEP_MAX) {
++              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++                      depth, HTREE_LOCK_DEP_MAX);
++              return NULL;
++      }
++
++      lhead = kzalloc(offsetof(struct htree_lock_head,
++                               lh_children[depth]) + priv, GFP_NOFS);
++      if (lhead == NULL)
++              return NULL;
++
++      if (hbits < HTREE_HBITS_MIN)
++              lhead->lh_hbits = HTREE_HBITS_MIN;
++      else if (hbits > HTREE_HBITS_MAX)
++              lhead->lh_hbits = HTREE_HBITS_MAX;
++
++      lhead->lh_lock = 0;
++      lhead->lh_depth = depth;
++      INIT_LIST_HEAD(&lhead->lh_blocked_list);
++      if (priv > 0) {
++              lhead->lh_private = (void *)lhead +
++                      offsetof(struct htree_lock_head, lh_children[depth]);
++      }
++
++      for (i = 0; i < depth; i++) {
++              INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
++              lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
++      }
++      return lhead;
++}
++EXPORT_SYMBOL(htree_lock_head_alloc);
++
++/* free the htree_lock head */
++void
++htree_lock_head_free(struct htree_lock_head *lhead)
++{
++      int     i;
++
++      BUG_ON(!list_empty(&lhead->lh_blocked_list));
++      for (i = 0; i < lhead->lh_depth; i++)
++              BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
++      kfree(lhead);
++}
++EXPORT_SYMBOL(htree_lock_head_free);
++
++/* register event callback for @events of child-lock at level @dep */
++void
++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
++                      unsigned events, htree_event_cb_t callback)
++{
++      BUG_ON(lhead->lh_depth <= dep);
++      lhead->lh_children[dep].lc_events = events;
++      lhead->lh_children[dep].lc_callback = callback;
++}
++EXPORT_SYMBOL(htree_lock_event_attach);
++
++/* allocate a htree_lock, which is per-thread structure, @pbytes is some
++ * extra-bytes as private data for caller */
++struct htree_lock *
++htree_lock_alloc(unsigned depth, unsigned pbytes)
++{
++      struct htree_lock *lck;
++      int i = offsetof(struct htree_lock, lk_nodes[depth]);
++
++      if (depth > HTREE_LOCK_DEP_MAX) {
++              printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++                      depth, HTREE_LOCK_DEP_MAX);
++              return NULL;
++      }
++      lck = kzalloc(i + pbytes, GFP_NOFS);
++      if (lck == NULL)
++              return NULL;
++
++      if (pbytes != 0)
++              lck->lk_private = (void *)lck + i;
++      lck->lk_mode = HTREE_LOCK_INVAL;
++      lck->lk_depth = depth;
++      INIT_LIST_HEAD(&lck->lk_blocked_list);
++
++      for (i = 0; i < depth; i++) {
++              struct htree_lock_node *node = &lck->lk_nodes[i];
++
++              node->ln_mode = HTREE_LOCK_INVAL;
++              INIT_LIST_HEAD(&node->ln_major_list);
++              INIT_LIST_HEAD(&node->ln_minor_list);
++              INIT_LIST_HEAD(&node->ln_alive_list);
++              INIT_LIST_HEAD(&node->ln_blocked_list);
++              INIT_LIST_HEAD(&node->ln_granted_list);
++      }
++
++      return lck;
++}
++EXPORT_SYMBOL(htree_lock_alloc);
++
++/* free htree_lock node */
++void
++htree_lock_free(struct htree_lock *lck)
++{
++      BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
++      kfree(lck);
++}
++EXPORT_SYMBOL(htree_lock_free);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 91525f7..9c57749 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t *handle,
+                                       ext4_lblk_t *block)
+ {
+       struct buffer_head *bh;
++      struct ext4_inode_info *ei = EXT4_I(inode);
+       int err;
+       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+@@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t *handle,
+                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+               return ERR_PTR(-ENOSPC);
++      /* with parallel dir operations all appends
++      * have to be serialized -bzzz */
++      down(&ei->i_append_sem);
++
+       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+       bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
+-      if (IS_ERR(bh))
++      if (IS_ERR(bh)) {
++              up(&ei->i_append_sem);
+               return bh;
++      }
+       inode->i_size += inode->i_sb->s_blocksize;
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
++      up(&ei->i_append_sem);
+       if (err) {
+               brelse(bh);
+               ext4_std_error(inode->i_sb, err);
+@@ -264,7 +272,8 @@ static unsigned dx_node_limit(struct inode *dir);
+ static struct dx_frame *dx_probe(struct ext4_filename *fname,
+                                struct inode *dir,
+                                struct dx_hash_info *hinfo,
+-                               struct dx_frame *frame);
++                               struct dx_frame *frame,
++                               struct htree_lock *lck);
+ static void dx_release(struct dx_frame *frames);
+ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+                      unsigned blocksize, struct dx_hash_info *hinfo,
+@@ -278,12 +287,13 @@ static void dx_insert_block(struct dx_frame *frame,
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash);
++                               __u32 *start_hash, struct htree_lock *lck);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+               struct ext4_filename *fname,
+-              struct ext4_dir_entry_2 **res_dir);
++              struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode);
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck);
+ /* checksumming functions */
+ void ext4_initialize_dirent_tail(struct buffer_head *bh,
+@@ -748,6 +758,227 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
+ }
+ #endif /* DX_DEBUG */
++/* private data for htree_lock */
++struct ext4_dir_lock_data {
++      unsigned                ld_flags;  /* bits-map for lock types */
++      unsigned                ld_count;  /* # entries of the last DX block */
++      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
++      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
++};
++
++#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
++#define ext4_find_entry(dir, name, dirent, inline) \
++                      ext4_find_entry_locked(dir, name, dirent, inline, NULL)
++#define ext4_add_entry(handle, dentry, inode) \
++                      ext4_add_entry_locked(handle, dentry, inode, NULL)
++
++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
++#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
++
++static void ext4_htree_event_cb(void *target, void *event)
++{
++      u64 *block = (u64 *)target;
++
++      if (*block == dx_get_block((struct dx_entry *)event))
++              *block = EXT4_HTREE_NODE_CHANGED;
++}
++
++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
++{
++      struct htree_lock_head *lhead;
++
++      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
++      if (lhead != NULL) {
++              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
++                                      ext4_htree_event_cb);
++      }
++      return lhead;
++}
++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
++
++struct htree_lock *ext4_htree_lock_alloc(void)
++{
++      return htree_lock_alloc(EXT4_LK_MAX,
++                              sizeof(struct ext4_dir_lock_data));
++}
++EXPORT_SYMBOL(ext4_htree_lock_alloc);
++
++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
++{
++      switch (flags) {
++      default: /* 0 or unknown flags require EX lock */
++              return HTREE_LOCK_EX;
++      case EXT4_HLOCK_READDIR:
++              return HTREE_LOCK_PR;
++      case EXT4_HLOCK_LOOKUP:
++              return HTREE_LOCK_CR;
++      case EXT4_HLOCK_DEL:
++      case EXT4_HLOCK_ADD:
++              return HTREE_LOCK_CW;
++      }
++}
++
++/* return PR for read-only operations, otherwise return EX */
++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
++{
++      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
++
++      /* 0 requires EX lock */
++      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
++}
++
++static int ext4_htree_safe_locked(struct htree_lock *lck)
++{
++      int writer;
++
++      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
++              return 1;
++
++      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
++               EXT4_LB_DE;
++      if (writer) /* all readers & writers are excluded? */
++              return lck->lk_mode == HTREE_LOCK_EX;
++
++      /* all writers are excluded? */
++      return lck->lk_mode == HTREE_LOCK_PR ||
++             lck->lk_mode == HTREE_LOCK_PW ||
++             lck->lk_mode == HTREE_LOCK_EX;
++}
++
++/* relock htree_lock with EX mode if it's change operation, otherwise
++ * relock it with PR mode. It's noop if PDO is disabled. */
++static void ext4_htree_safe_relock(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck)) {
++              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
++
++              htree_change_lock(lck, ext4_htree_safe_mode(flags));
++      }
++}
++
++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
++                   struct inode *dir, unsigned flags)
++{
++      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
++                                            ext4_htree_safe_mode(flags);
++
++      ext4_htree_lock_data(lck)->ld_flags = flags;
++      htree_lock(lck, lhead, mode);
++      if (!is_dx(dir))
++              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
++}
++EXPORT_SYMBOL(ext4_htree_lock);
++
++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
++                              unsigned lmask, int wait, void *ev)
++{
++      u32     key = (at == NULL) ? 0 : dx_get_block(at);
++      u32     mode;
++
++      /* NOOP if htree is well protected or caller doesn't require the lock */
++      if (ext4_htree_safe_locked(lck) ||
++         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
++              return 1;
++
++      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
++              HTREE_LOCK_PW : HTREE_LOCK_PR;
++      while (1) {
++              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
++                      return 1;
++              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
++                      return 0;
++              cpu_relax(); /* spin until granted */
++      }
++}
++
++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
++{
++      return ext4_htree_safe_locked(lck) ||
++             htree_node_is_granted(lck, ffz(~lmask));
++}
++
++static void ext4_htree_node_unlock(struct htree_lock *lck,
++                                 unsigned lmask, void *buf)
++{
++      /* NB: it's safe to call mutiple times or even it's not locked */
++      if (!ext4_htree_safe_locked(lck) &&
++           htree_node_is_granted(lck, ffz(~lmask)))
++              htree_node_unlock(lck, ffz(~lmask), buf);
++}
++
++#define ext4_htree_dx_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
++#define ext4_htree_dx_lock_try(lck, key)      \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
++#define ext4_htree_dx_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
++#define ext4_htree_dx_locked(lck)             \
++      ext4_htree_node_locked(lck, EXT4_LB_DX)
++
++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
++{
++      struct ext4_dir_lock_data *ld;
++
++      if (ext4_htree_safe_locked(lck))
++              return;
++
++      ld = ext4_htree_lock_data(lck);
++      switch (ld->ld_flags) {
++      default:
++              return;
++      case EXT4_HLOCK_LOOKUP:
++              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
++              return;
++      case EXT4_HLOCK_DEL:
++              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
++              return;
++      case EXT4_HLOCK_ADD:
++              ld->ld_flags = EXT4_HLOCK_SPLIT;
++              return;
++      }
++}
++
++#define ext4_htree_de_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
++#define ext4_htree_de_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
++
++#define ext4_htree_spin_lock(lck, key, event) \
++      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
++#define ext4_htree_spin_unlock(lck)           \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
++#define ext4_htree_spin_unlock_listen(lck, p) \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
++
++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck) &&
++          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
++              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
++}
++
++enum {
++      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
++      DX_HASH_COL_YES,        /* there is collision and it does matter */
++      DX_HASH_COL_NO,         /* there is no collision */
++};
++
++static int dx_probe_hash_collision(struct htree_lock *lck,
++                                 struct dx_entry *entries,
++                                 struct dx_entry *at, u32 hash)
++{
++      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
++              return DX_HASH_COL_IGNORE; /* don't care about collision */
++
++      } else if (at == entries + dx_get_count(entries) - 1) {
++              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
++
++      } else { /* hash collision? */
++              return ((dx_get_hash(at + 1) & ~1) == hash) ?
++                      DX_HASH_COL_YES : DX_HASH_COL_NO;
++      }
++}
++
+ /*
+  * Probe for a directory leaf block to search.
+  *
+@@ -759,10 +990,11 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
+  */
+ static struct dx_frame *
+ dx_probe(struct ext4_filename *fname, struct inode *dir,
+-       struct dx_hash_info *hinfo, struct dx_frame *frame_in)
++       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
++       struct htree_lock *lck)
+ {
+       unsigned count, indirect;
+-      struct dx_entry *at, *entries, *p, *q, *m;
++      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
+       struct dx_root_info *info;
+       struct dx_frame *frame = frame_in;
+       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
+@@ -824,8 +1056,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+       dxtrace(printk("Look up %x", hash));
+       while (1) {
++              if (indirect == 0) { /* the last index level */
++                      /* NB: ext4_htree_dx_lock() could be noop if
++                       * DX-lock flag is not set for current operation */
++                      ext4_htree_dx_lock(lck, dx);
++                      ext4_htree_spin_lock(lck, dx, NULL);
++              }
+               count = dx_get_count(entries);
+-              if (!count || count > dx_get_limit(entries)) {
++              if (count == 0 || count > dx_get_limit(entries)) {
++                      ext4_htree_spin_unlock(lck); /* release spin */
+                       ext4_warning_inode(dir,
+                                          "dx entry: count %u beyond limit %u",
+                                          count, dx_get_limit(entries));
+@@ -864,8 +1103,70 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+                              dx_get_block(at)));
+               frame->entries = entries;
+               frame->at = at;
+-              if (!indirect--)
++
++              if (indirect == 0) { /* the last index level */
++                      struct ext4_dir_lock_data *ld;
++                      u64 myblock;
++
++                      /* By default we only lock DE-block, however, we will
++                       * also lock the last level DX-block if:
++                       * a) there is hash collision
++                       *    we will set DX-lock flag (a few lines below)
++                       *    and redo to lock DX-block
++                       *    see detail in dx_probe_hash_collision()
++                       * b) it's a retry from splitting
++                       *    we need to lock the last level DX-block so nobody
++                       *    else can split any leaf blocks under the same
++                       *    DX-block, see detail in ext4_dx_add_entry()
++                       */
++                      if (ext4_htree_dx_locked(lck)) {
++                              /* DX-block is locked, just lock DE-block
++                               * and return */
++                              ext4_htree_spin_unlock(lck);
++                              if (!ext4_htree_safe_locked(lck))
++                                      ext4_htree_de_lock(lck, frame->at);
++                              return frame;
++                      }
++                      /* it's pdirop and no DX lock */
++                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
++                          DX_HASH_COL_YES) {
++                              /* found hash collision, set DX-lock flag
++                               * and retry to abtain DX-lock */
++                              ext4_htree_spin_unlock(lck);
++                              ext4_htree_dx_need_lock(lck);
++                              continue;
++                      }
++                      ld = ext4_htree_lock_data(lck);
++                      /* because I don't lock DX, so @at can't be trusted
++                       * after I release spinlock so I have to save it */
++                      ld->ld_at = at;
++                      ld->ld_at_entry = *at;
++                      ld->ld_count = dx_get_count(entries);
++
++                      frame->at = &ld->ld_at_entry;
++                      myblock = dx_get_block(at);
++
++                      /* NB: ordering locking */
++                      ext4_htree_spin_unlock_listen(lck, &myblock);
++                      /* other thread can split this DE-block because:
++                       * a) I don't have lock for the DE-block yet
++                       * b) I released spinlock on DX-block
++                       * if it happened I can detect it by listening
++                       * splitting event on this DE-block */
++                      ext4_htree_de_lock(lck, frame->at);
++                      ext4_htree_spin_stop_listen(lck);
++
++                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
++                              /* someone split this DE-block before
++                               * I locked it, I need to retry and lock
++                               * valid DE-block */
++                              ext4_htree_de_unlock(lck);
++                              continue;
++                      }
+                       return frame;
++              }
++              dx = at;
++              indirect--;
+               frame++;
+               frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+               if (IS_ERR(frame->bh)) {
+@@ -934,7 +1235,7 @@ static void dx_release(struct dx_frame *frames)
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash)
++                               __u32 *start_hash, struct htree_lock *lck)
+ {
+       struct dx_frame *p;
+       struct buffer_head *bh;
+@@ -949,12 +1250,22 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+        * this loop, num_frames indicates the number of interior
+        * nodes need to be read.
+        */
++      ext4_htree_de_unlock(lck);
+       while (1) {
+-              if (++(p->at) < p->entries + dx_get_count(p->entries))
+-                      break;
++              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
++                      /* num_frames > 0 :
++                       *   DX block
++                       * ext4_htree_dx_locked:
++                       *   frame->at is reliable pointer returned by dx_probe,
++                       *   otherwise dx_probe already knew no collision */
++                      if (++(p->at) < p->entries + dx_get_count(p->entries))
++                              break;
++              }
+               if (p == frames)
+                       return 0;
+               num_frames++;
++              if (num_frames == 1)
++                      ext4_htree_dx_unlock(lck);
+               p--;
+       }
+@@ -977,6 +1288,13 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+        * block so no check is necessary
+        */
+       while (num_frames--) {
++              if (num_frames == 0) {
++                      /* it's not always necessary, we just don't want to
++                       * detect hash collision again */
++                      ext4_htree_dx_need_lock(lck);
++                      ext4_htree_dx_lock(lck, p->at);
++              }
++
+               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
+@@ -985,6 +1303,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+               p->bh = bh;
+               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+       }
++      ext4_htree_de_lock(lck, p->at);
+       return 1;
+ }
+@@ -1132,10 +1451,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+       }
+       hinfo.hash = start_hash;
+       hinfo.minor_hash = 0;
+-      frame = dx_probe(NULL, dir, &hinfo, frames);
++      /* assume it's PR locked */
++      frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+-
+       /* Add '.' and '..' from the htree header */
+       if (!start_hash && !start_minor_hash) {
+               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+@@ -1175,7 +1494,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+               count += ret;
+               hashval = ~0;
+               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+-                                          frame, frames, &hashval);
++                                          frame, frames, &hashval, NULL);
+               *next_hash = hashval;
+               if (ret < 0) {
+                       err = ret;
+@@ -1451,7 +1770,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
+ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+                                            struct ext4_filename *fname,
+                                            struct ext4_dir_entry_2 **res_dir,
+-                                           int *inlined)
++                                           int *inlined, struct htree_lock *lck)
+ {
+       struct super_block *sb;
+       struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -1493,7 +1812,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+               goto restart;
+       }
+       if (is_dx(dir)) {
+-              ret = ext4_dx_find_entry(dir, fname, res_dir);
++              ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
+               /*
+                * On success, or if the error was file not found,
+                * return.  Otherwise, fall back to doing a search the
+@@ -1503,6 +1822,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+                       goto cleanup_and_exit;
+               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+                              "falling back\n"));
++              ext4_htree_safe_relock(lck);
+               ret = NULL;
+       }
+       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -1590,10 +1910,10 @@ cleanup_and_exit:
+       return ret;
+ }
+-static struct buffer_head *ext4_find_entry(struct inode *dir,
++struct buffer_head *ext4_find_entry_locked(struct inode *dir,
+                                          const struct qstr *d_name,
+                                          struct ext4_dir_entry_2 **res_dir,
+-                                         int *inlined)
++                                         int *inlined, struct htree_lock *lck)
+ {
+       int err;
+       struct ext4_filename fname;
+@@ -1605,12 +1925,14 @@ static struct buffer_head *ext4_find_entry(struct inode *dir,
+       if (err)
+               return ERR_PTR(err);
+-      bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
++      bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
+       ext4_fname_free_filename(&fname);
+       return bh;
+ }
++EXPORT_SYMBOL(ext4_find_entry_locked);
++
+ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
+                                            struct dentry *dentry,
+                                            struct ext4_dir_entry_2 **res_dir)
+@@ -1625,7 +1947,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
+       if (err)
+               return ERR_PTR(err);
+-      bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
++      bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
+       ext4_fname_free_filename(&fname);
+       return bh;
+@@ -1633,7 +1955,8 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+                       struct ext4_filename *fname,
+-                      struct ext4_dir_entry_2 **res_dir)
++                      struct ext4_dir_entry_2 **res_dir,
++                      struct htree_lock *lck)
+ {
+       struct super_block * sb = dir->i_sb;
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+@@ -1644,7 +1967,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+ #ifdef CONFIG_FS_ENCRYPTION
+       *res_dir = NULL;
+ #endif
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return (struct buffer_head *) frame;
+       do {
+@@ -1666,7 +1989,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+               /* Check to see if we should continue to search */
+               retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
+-                                             frames, NULL);
++                                             frames, NULL, lck);
+               if (retval < 0) {
+                       ext4_warning_inode(dir,
+                               "error %d reading directory index block",
+@@ -1846,8 +2169,9 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
+  * Returns pointer to de in block into which the new entry will be inserted.
+  */
+ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+-                      struct buffer_head **bh,struct dx_frame *frame,
+-                      struct dx_hash_info *hinfo)
++                      struct buffer_head **bh, struct dx_frame *frames,
++                      struct dx_frame *frame, struct dx_hash_info *hinfo,
++                      struct htree_lock *lck)
+ {
+       unsigned blocksize = dir->i_sb->s_blocksize;
+       unsigned count, continued;
+@@ -1908,8 +2232,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+                                       hash2, split, count-split));
+       /* Fancy dance to stay within two buffers */
+-      de2 = dx_move_dirents(data1, data2, map + split, count - split,
+-                            blocksize);
++      if (hinfo->hash < hash2) {
++              de2 = dx_move_dirents(data1, data2, map + split,
++                                    count - split, blocksize);
++      } else {
++              /* make sure we will add entry to the same block which
++               * we have already locked */
++              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
++      }
+       de = dx_pack_dirents(data1, blocksize);
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
+@@ -1927,12 +2257,21 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+       dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+                       blocksize, 1));
+-      /* Which block gets the new entry? */
+-      if (hinfo->hash >= hash2) {
+-              swap(*bh, bh2);
+-              de = de2;
++      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
++                           frame->at); /* notify block is being split */
++      if (hinfo->hash < hash2) {
++              dx_insert_block(frame, hash2 + continued, newblock);
++
++      } else {
++              /* switch block number */
++              dx_insert_block(frame, hash2 + continued,
++                              dx_get_block(frame->at));
++              dx_set_block(frame->at, newblock);
++              (frame->at)++;
+       }
+-      dx_insert_block(frame, hash2 + continued, newblock);
++      ext4_htree_spin_unlock(lck);
++      ext4_htree_dx_unlock(lck);
++
+       err = ext4_handle_dirty_dirblock(handle, dir, bh2);
+       if (err)
+               goto journal_error;
+@@ -2202,7 +2541,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+       if (retval)
+               goto out_frames;        
+-      de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
+       if (IS_ERR(de)) {
+               retval = PTR_ERR(de);
+               goto out_frames;
+@@ -2312,8 +2651,8 @@ out:
+  * may not sleep between calling this and putting something into
+  * the entry, as someone else might have used it while you slept.
+  */
+-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-                        struct inode *inode)
++int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
++                        struct inode *inode, struct htree_lock *lck)
+ {
+       struct inode *dir = d_inode(dentry->d_parent);
+       struct buffer_head *bh = NULL;
+@@ -2361,9 +2700,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+               if (dentry->d_name.len == 2 &&
+                    memcmp(dentry->d_name.name, "..", 2) == 0)
+                        return ext4_update_dotdot(handle, dentry, inode);
+-              retval = ext4_dx_add_entry(handle, &fname, dir, inode);
++              retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       goto out;
++              ext4_htree_safe_relock(lck);
+               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+               dx_fallback++;
+               ext4_mark_inode_dirty(handle, dir);
+@@ -2417,12 +2757,14 @@ out:
+               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+       return retval;
+ }
++EXPORT_SYMBOL(ext4_add_entry_locked);
+ /*
+  * Returns 0 for success, or a negative error value
+  */
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode)
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck)
+ {
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+       struct dx_entry *entries, *at;
+@@ -2434,7 +2776,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+ again:
+       restart = 0;
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+       entries = frame->entries;
+@@ -2469,6 +2811,12 @@ again:
+               struct dx_node *node2;
+               struct buffer_head *bh2;
++              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
++                      ext4_htree_safe_relock(lck);
++                      restart = 1;
++                      goto cleanup;
++              }
++
+               while (frame > frames) {
+                       if (dx_get_count((frame - 1)->entries) <
+                           dx_get_limit((frame - 1)->entries)) {
+@@ -2571,8 +2919,32 @@ again:
+                       restart = 1;
+                       goto journal_error;
+               }
++      } else if (!ext4_htree_dx_locked(lck)) {
++              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
++
++              /* not well protected, require DX lock */
++              ext4_htree_dx_need_lock(lck);
++              at = frame > frames ? (frame - 1)->at : NULL;
++
++              /* NB: no risk of deadlock because it's just a try.
++               *
++               * NB: we check ld_count for twice, the first time before
++               * having DX lock, the second time after holding DX lock.
++               *
++               * NB: We never free blocks for directory so far, which
++               * means value returned by dx_get_count() should equal to
++               * ld->ld_count if nobody split any DE-block under @at,
++               * and ld->ld_at still points to valid dx_entry. */
++              if ((ld->ld_count != dx_get_count(entries)) ||
++                  !ext4_htree_dx_lock_try(lck, at) ||
++                  (ld->ld_count != dx_get_count(entries))) {
++                      restart = 1;
++                      goto cleanup;
++              }
++              /* OK, I've got DX lock and nothing changed */
++              frame->at = ld->ld_at;
+       }
+-      de = do_split(handle, dir, &bh, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
+       if (IS_ERR(de)) {
+               err = PTR_ERR(de);
+               goto cleanup;
+@@ -2583,6 +2955,8 @@ again:
+ journal_error:
+       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
+ cleanup:
++      ext4_htree_dx_unlock(lck);
++      ext4_htree_de_unlock(lck);
+       brelse(bh);
+       dx_release(frames);
+       /* @restart is true means htree-path has been changed, we need to
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 0fcc33b..3cc0306 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1076,6 +1076,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+       inode_set_iversion(&ei->vfs_inode, 1);
+       spin_lock_init(&ei->i_raw_lock);
++      sema_init(&ei->i_append_sem, 1);
+       INIT_LIST_HEAD(&ei->i_prealloc_list);
+       spin_lock_init(&ei->i_prealloc_lock);
+       ext4_es_init_tree(&ei->i_es_tree);
+diff --git a/include/linux/htree_lock.h b/include/linux/htree_lock.h
+new file mode 100644
+index 0000000..9dc7788
+--- /dev/null
++++ b/include/linux/htree_lock.h
+@@ -0,0 +1,187 @@
++/*
++ * include/linux/htree_lock.h
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++
++/*
++ * htree lock
++ *
++ * htree_lock is an advanced lock, it can support five lock modes (concept is
++ * taken from DLM) and it's a sleeping lock.
++ *
++ * most common use case is:
++ * - create a htree_lock_head for data
++ * - each thread (contender) creates it's own htree_lock
++ * - contender needs to call htree_lock(lock_node, mode) to protect data and
++ *   call htree_unlock to release lock
++ *
++ * Also, there is advanced use-case which is more complex, user can have
++ * PW/PR lock on particular key, it's mostly used while user holding shared
++ * lock on the htree (CW, CR)
++ *
++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
++ * ...
++ * htree_node_unlock(lock_node);; unlock the key
++ *
++ * Another tip is, we can have N-levels of this kind of keys, all we need to
++ * do is specifying N-levels while creating htree_lock_head, then we can
++ * lock/unlock a specific level by:
++ * htree_node_lock(lock_node, mode1, key1, level1...);
++ * do something;
++ * htree_node_lock(lock_node, mode1, key2, level2...);
++ * do something;
++ * htree_node_unlock(lock_node, level2);
++ * htree_node_unlock(lock_node, level1);
++ *
++ * NB: for multi-level, should be careful about locking order to avoid deadlock
++ */
++
++#ifndef _LINUX_HTREE_LOCK_H
++#define _LINUX_HTREE_LOCK_H
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++
++/*
++ * Lock Modes
++ * more details can be found here:
++ * http://en.wikipedia.org/wiki/Distributed_lock_manager
++ */
++typedef enum {
++      HTREE_LOCK_EX   = 0, /* exclusive lock: incompatible with all others */
++      HTREE_LOCK_PW,       /* protected write: allows only CR users */
++      HTREE_LOCK_PR,       /* protected read: allow PR, CR users */
++      HTREE_LOCK_CW,       /* concurrent write: allow CR, CW users */
++      HTREE_LOCK_CR,       /* concurrent read: allow all but EX users */
++      HTREE_LOCK_MAX,      /* number of lock modes */
++} htree_lock_mode_t;
++
++#define HTREE_LOCK_NL         HTREE_LOCK_MAX
++#define HTREE_LOCK_INVAL      0xdead10c
++
++enum {
++      HTREE_HBITS_MIN         = 2,
++      HTREE_HBITS_DEF         = 14,
++      HTREE_HBITS_MAX         = 32,
++};
++
++enum {
++      HTREE_EVENT_DISABLE     = (0),
++      HTREE_EVENT_RD          = (1 << HTREE_LOCK_PR),
++      HTREE_EVENT_WR          = (1 << HTREE_LOCK_PW),
++      HTREE_EVENT_RDWR        = (HTREE_EVENT_RD | HTREE_EVENT_WR),
++};
++
++struct htree_lock;
++
++typedef void (*htree_event_cb_t)(void *target, void *event);
++
++struct htree_lock_child {
++      struct list_head        lc_list;        /* granted list */
++      htree_event_cb_t        lc_callback;    /* event callback */
++      unsigned                lc_events;      /* event types */
++};
++
++struct htree_lock_head {
++      unsigned long           lh_lock;        /* bits lock */
++      /* blocked lock list (htree_lock) */
++      struct list_head        lh_blocked_list;
++      /* # key levels */
++      u16                     lh_depth;
++      /* hash bits for key and limit number of locks */
++      u16                     lh_hbits;
++      /* counters for blocked locks */
++      u16                     lh_nblocked[HTREE_LOCK_MAX];
++      /* counters for granted locks */
++      u16                     lh_ngranted[HTREE_LOCK_MAX];
++      /* private data */
++      void                    *lh_private;
++      /* array of children locks */
++      struct htree_lock_child lh_children[0];
++};
++
++/* htree_lock_node_t is child-lock for a specific key (ln_value) */
++struct htree_lock_node {
++      htree_lock_mode_t       ln_mode;
++      /* major hash key */
++      u16                     ln_major_key;
++      /* minor hash key */
++      u16                     ln_minor_key;
++      struct list_head        ln_major_list;
++      struct list_head        ln_minor_list;
++      /* alive list, all locks (granted, blocked, listening) are on it */
++      struct list_head        ln_alive_list;
++      /* blocked list */
++      struct list_head        ln_blocked_list;
++      /* granted list */
++      struct list_head        ln_granted_list;
++      void                    *ln_ev_target;
++};
++
++struct htree_lock {
++      struct task_struct      *lk_task;
++      struct htree_lock_head  *lk_head;
++      void                    *lk_private;
++      unsigned                lk_depth;
++      htree_lock_mode_t       lk_mode;
++      struct list_head        lk_blocked_list;
++      struct htree_lock_node  lk_nodes[0];
++};
++
++/* create a lock head, which stands for a resource */
++struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
++                                            unsigned hbits, unsigned priv);
++/* free a lock head */
++void htree_lock_head_free(struct htree_lock_head *lhead);
++/* register event callback for child lock at level @depth */
++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
++                           unsigned events, htree_event_cb_t callback);
++/* create a lock handle, which stands for a thread */
++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
++/* free a lock handle */
++void htree_lock_free(struct htree_lock *lck);
++/* lock htree, when @wait is true, 0 is returned if the lock can't
++ * be granted immediately */
++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++                 htree_lock_mode_t mode, int wait);
++/* unlock htree */
++void htree_unlock(struct htree_lock *lck);
++/* unlock and relock htree with @new_mode */
++int htree_change_lock_try(struct htree_lock *lck,
++                        htree_lock_mode_t new_mode, int wait);
++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
++/* require child lock (key) of htree at level @dep, @event will be sent to all
++ * listeners on this @key while lock being granted */
++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++                      u32 key, unsigned dep, int wait, void *event);
++/* release child lock at level @dep, this lock will listen on it's key
++ * if @event isn't NULL, event_cb will be called against @lck while granting
++ * any other lock at level @dep with the same key */
++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
++/* stop listening on child lock at level @dep */
++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
++/* for debug */
++void htree_lock_stat_print(int depth);
++void htree_lock_stat_reset(void);
++
++#define htree_lock(lck, lh, mode)     htree_lock_try(lck, lh, mode, 1)
++#define htree_change_lock(lck, mode)  htree_change_lock_try(lck, mode, 1)
++
++#define htree_lock_mode(lck)          ((lck)->lk_mode)
++
++#define htree_node_lock(lck, mode, key, dep)  \
++      htree_node_lock_try(lck, mode, key, dep, 1, NULL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_granted(lck, dep)               \
++      ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
++       (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_listening(lck, dep)     \
++      ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
++
++#endif
+-- 
+2.20.1
+
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-5.4.0-ml.series b/ldiskfs/kernel_patches/series/ldiskfs-5.4.0-ml.series
new file mode 100644 (file)
index 0000000..262078a
--- /dev/null
@@ -0,0 +1,27 @@
+rhel8/ext4-inode-version.patch
+linux-5.4/ext4-lookup-dotdot.patch
+sles12sp2/ext4-print-inum-in-htree-warning.patch
+rhel8/ext4-prealloc.patch
+ubuntu18/ext4-osd-iop-common.patch
+ubuntu19/ext4-misc.patch
+rhel8/ext4-mballoc-extra-checks.patch
+linux-5.4/ext4-hash-indexed-dir-dotdot-update.patch
+linux-5.4/ext4-kill-dx-root.patch
+rhel7/ext4-mballoc-pa-free-mismatch.patch
+linux-5.4/ext4-data-in-dirent.patch
+rhel8/ext4-nocmtime.patch
+linux-5.4/ext4-pdirop.patch
+sles12sp3/ext4-max-dir-size.patch
+ubuntu18/ext4-remove-truncate-warning.patch
+rhel8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
+linux-5.4/ext4-give-warning-with-dir-htree-growing.patch
+ubuntu18/ext4-jcb-optimization.patch
+linux-5.4/ext4-attach-jinode-in-writepages.patch
+rhel8/ext4-dont-check-before-replay.patch
+rhel7/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
+rhel7/ext4-export-orphan-add.patch
+rhel8/ext4-export-mb-stream-allocator-variables.patch
+ubuntu19/ext4-iget-with-flags.patch
+linux-5.4/export-ext4fs-dirhash-helper.patch
+linux-5.4/ext4-misc.patch
+rhel8/ext4-simple-blockalloc.patch
index 031cd33..1612f20 100644 (file)
@@ -166,6 +166,25 @@ static inline struct lvar_leaf_entry *e_next(const struct iam_leaf *leaf,
 #define LVAR_HASH_R5        (0)
 #define LVAR_HASH_PREFIX    (0)
 
 #define LVAR_HASH_R5        (0)
 #define LVAR_HASH_PREFIX    (0)
 
+#ifdef HAVE_LDISKFSFS_GETHASH_INODE_ARG
+/*
+ * NOTE: doing this breaks on file systems configured with
+ *       case-insensitive file name lookups
+ *
+ * kernel 5.2 commit b886ee3e778ec2ad43e276fd378ab492cf6819b7
+ * ext4: Support case-insensitive file name lookups
+ *
+ * FUTURE:
+ *  We need to pass the struct inode *dir down to hash_build0
+ *  to enable case-insensitive file name support ext4/ldiskfs
+ */
+#define e_ldiskfsfs_dirhash(name, len, info) \
+               __ldiskfsfs_dirhash(name, len, info)
+#else
+#define e_ldiskfsfs_dirhash(name, len, info) \
+               ldiskfsfs_dirhash(name, len, info)
+#endif
+
 static u32 hash_build0(const char *name, int namelen)
 {
        u32 result;
 static u32 hash_build0(const char *name, int namelen)
 {
        u32 result;
@@ -186,14 +205,14 @@ static u32 hash_build0(const char *name, int namelen)
 
                hinfo.hash_version = LDISKFS_DX_HASH_TEA;
                hinfo.seed = NULL;
 
                hinfo.hash_version = LDISKFS_DX_HASH_TEA;
                hinfo.seed = NULL;
-               ldiskfsfs_dirhash(name, namelen, &hinfo);
+               e_ldiskfsfs_dirhash(name, namelen, &hinfo);
                result = hinfo.hash;
                if (LVAR_HASH_SANDWICH) {
                        u32 result2;
 
                        hinfo.hash_version = LDISKFS_DX_HASH_TEA;
                        hinfo.seed = NULL;
                result = hinfo.hash;
                if (LVAR_HASH_SANDWICH) {
                        u32 result2;
 
                        hinfo.hash_version = LDISKFS_DX_HASH_TEA;
                        hinfo.seed = NULL;
-                       ldiskfsfs_dirhash(name, namelen, &hinfo);
+                       e_ldiskfsfs_dirhash(name, namelen, &hinfo);
                        result2 = hinfo.hash;
                        result = (0xfc000000 & result2) | (0x03ffffff & result);
                }
                        result2 = hinfo.hash;
                        result = (0xfc000000 & result2) | (0x03ffffff & result);
                }
index f7c1fcb..9ed1748 100644 (file)
@@ -908,9 +908,17 @@ static inline struct buffer_head *osd_ldiskfs_append(handle_t *handle,
                return ERR_PTR(rc);
        return ldiskfs_append(handle, inode, nblock);
 }
                return ERR_PTR(rc);
        return ldiskfs_append(handle, inode, nblock);
 }
-# define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
+
+# ifdef HAVE___LDISKFS_FIND_ENTRY
+#  define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
                (__ldiskfs_find_entry(dir, name, de, inlined, lock) ?: \
                 ERR_PTR(-ENOENT))
                (__ldiskfs_find_entry(dir, name, de, inlined, lock) ?: \
                 ERR_PTR(-ENOENT))
+# else
+#  define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
+               (ldiskfs_find_entry_locked(dir, name, de, inlined, lock) ?: \
+                ERR_PTR(-ENOENT))
+# endif
+
 # define osd_journal_start(inode, type, nblocks) \
                ldiskfs_journal_start(inode, type, nblocks)
 # define osd_transaction_size(dev) \
 # define osd_journal_start(inode, type, nblocks) \
                ldiskfs_journal_start(inode, type, nblocks)
 # define osd_transaction_size(dev) \
@@ -934,13 +942,24 @@ static inline struct buffer_head *osd_ldiskfs_append(handle_t *handle,
        return bh;
 }
 
        return bh;
 }
 
-# define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
+# ifdef HAVE___LDISKFS_FIND_ENTRY
+#  define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
                (__ldiskfs_find_entry(dir, name, de, lock) ?: \
                 ERR_PTR(-ENOENT))
                (__ldiskfs_find_entry(dir, name, de, lock) ?: \
                 ERR_PTR(-ENOENT))
+# else
+#  define osd_ldiskfs_find_entry(dir, name, de, inlined, lock) \
+               (ldiskfs_find_entry_locked(dir, name, de, lock) ?: \
+                ERR_PTR(-ENOENT))
+# endif
 # define osd_journal_start(inode, type, nblocks) \
                ldiskfs_journal_start(inode, nblocks)
 # define osd_transaction_size(dev) \
                (osd_journal(dev)->j_max_transaction_buffers)
 # define osd_journal_start(inode, type, nblocks) \
                ldiskfs_journal_start(inode, nblocks)
 # define osd_transaction_size(dev) \
                (osd_journal(dev)->j_max_transaction_buffers)
+#endif /* LDISKFS_HT_MISC */
+
+#ifndef HAVE___LDISKFS_FIND_ENTRY
+# define __ldiskfs_add_entry(handle, child, inode, hlock) \
+               ldiskfs_add_entry_locked(handle, child, inode, hlock)
 #endif
 
 /*
 #endif
 
 /*