This patch adds support for RHEL7.1 [3.10.0-229.el7] kernel.
Signed-off-by: Bob Glossman <bob.glossman@intel.com>
Signed-off-by: Yang Sheng <yang.sheng@intel.com>
Change-Id: Ifbc294a53bd21eb35d373637d3326fc3c611c9f0
Reviewed-on: http://review.whamcloud.com/10249
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
AC_MSG_CHECKING([which ldiskfs series to use])
AS_IF([test x$RHEL_KERNEL = xyes], [
case $RHEL_RELEASE_NO in
- 70) LDISKFS_SERIES="3.10-rhel7.series" ;;
+ 71) LDISKFS_SERIES="3.10-rhel7.series" ;;
66) LDISKFS_SERIES="2.6-rhel6.6.series" ;;
65) LDISKFS_SERIES="2.6-rhel6.5.series" ;;
64) LDISKFS_SERIES="2.6-rhel6.4.series" ;;
--- /dev/null
+this patch implements feature which allows ext4 fs users (e.g. Lustre)
+to store data in ext4 dirent.
+data is stored in ext4 dirent after file-name, this space is accounted
+in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
+is present.
+
+make use of dentry->d_fsdata to pass fid to ext4. so no
+changes in ext4_add_entry() interface required.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/dir.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/dir.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/dir.c
+@@ -71,11 +71,11 @@ int __ext4_check_dir_entry(const char *f
+ const int rlen = ext4_rec_len_from_disk(de->rec_len,
+ dir->i_sb->s_blocksize);
+
+- if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
++ if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
+ error_msg = "rec_len is smaller than minimal";
+ else if (unlikely(rlen % 4 != 0))
+ error_msg = "rec_len % 4 != 0";
+- else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
++ else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
+ error_msg = "rec_len is too small for name_len";
+ else if (unlikely(((char *) de - buf) + rlen > size))
+ error_msg = "directory entry across range";
+@@ -208,7 +208,7 @@ revalidate:
+ * failure will be detected in the
+ * dirent test below. */
+ if (ext4_rec_len_from_disk(de->rec_len,
+- sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
++ sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
+ break;
+ i += ext4_rec_len_from_disk(de->rec_len,
+ sb->s_blocksize);
+@@ -438,12 +438,17 @@ int ext4_htree_store_dirent(struct file
+ struct fname *fname, *new_fn;
+ struct dir_private_info *info;
+ int len;
++ int extra_data = 0;
+
+ info = dir_file->private_data;
+ p = &info->root.rb_node;
+
+ /* Create and allocate the fname structure */
+- len = sizeof(struct fname) + dirent->name_len + 1;
++ if (dirent->file_type & EXT4_DIRENT_LUFID)
++ extra_data = ext4_get_dirent_data_len(dirent);
++
++ len = sizeof(struct fname) + dirent->name_len + extra_data + 1;
++
+ new_fn = kzalloc(len, GFP_KERNEL);
+ if (!new_fn)
+ return -ENOMEM;
+@@ -452,7 +457,7 @@ int ext4_htree_store_dirent(struct file
+ new_fn->inode = le32_to_cpu(dirent->inode);
+ new_fn->name_len = dirent->name_len;
+ new_fn->file_type = dirent->file_type;
+- memcpy(new_fn->name, dirent->name, dirent->name_len);
++ memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data);
+ new_fn->name[dirent->name_len] = 0;
+
+ while (*p) {
+@@ -452,7 +457,7 @@ int ext4_htree_store_dirent(struct file
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ buf, buf_size, offset))
+ return -EIO;
+- nlen = EXT4_DIR_REC_LEN(de->name_len);
++ nlen = EXT4_DIR_REC_LEN(de);
+ rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+ de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+ offset += rlen;
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -952,6 +952,7 @@ struct ext4_inode_info {
+ #define EXT4_MOUNT_ERRORS_MASK 0x00070
+ #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
+ #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
++#define EXT4_MOUNT_DIRDATA 0x00200 /* Data in directory entries*/
+ #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
+ #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
+ #define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
+@@ -1534,6 +1535,7 @@ static inline void ext4_clear_state_flag
+ EXT4_FEATURE_INCOMPAT_64BIT| \
+ EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+ EXT4_FEATURE_INCOMPAT_MMP | \
++ EXT4_FEATURE_INCOMPAT_DIRDATA| \
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA)
+ #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+@@ -1640,6 +1642,43 @@ struct ext4_dir_entry_tail {
+ #define EXT4_FT_SYMLINK 7
+
+ #define EXT4_FT_MAX 8
++#define EXT4_FT_MASK 0xf
++
++#if EXT4_FT_MAX > EXT4_FT_MASK
++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
++#endif
++
++/*
++ * d_type has 4 unused bits, so it can hold four types data. these different
++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
++ * stored, in flag order, after file-name in ext4 dirent.
++*/
++/*
++ * this flag is added to d_type if ext4 dirent has extra data after
++ * filename. this data length is variable and length is stored in first byte
++ * of data. data start after filename NUL byte.
++ * This is used by Lustre FS.
++ */
++#define EXT4_DIRENT_LUFID 0x10
++
++#define EXT4_LUFID_MAGIC 0xAD200907UL
++struct ext4_dentry_param {
++ __u32 edp_magic; /* EXT4_LUFID_MAGIC */
++ char edp_len; /* size of edp_data in bytes */
++ char edp_data[0]; /* packed array of data */
++} __attribute__((packed));
++
++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
++ struct ext4_dentry_param* p)
++
++{
++ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
++ return NULL;
++ if (p && p->edp_magic == EXT4_LUFID_MAGIC)
++ return &p->edp_len;
++ else
++ return NULL;
++}
+
+ #define EXT4_FT_DIR_CSUM 0xDE
+
+@@ -1650,8 +1689,11 @@ struct ext4_dir_entry_tail {
+ */
+ #define EXT4_DIR_PAD 4
+ #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1)
+-#define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \
++#define __EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \
+ ~EXT4_DIR_ROUND)
++#define EXT4_DIR_REC_LEN(de) (__EXT4_DIR_REC_LEN((de)->name_len +\
++ ext4_get_dirent_data_len(de)))
++
+ #define EXT4_MAX_REC_LEN ((1<<16)-1)
+
+ /*
+@@ -1987,11 +2029,11 @@ extern int ext4_find_dest_de(struct inod
+ struct buffer_head *bh,
+ void *buf, int buf_size,
+ const char *name, int namelen,
+- struct ext4_dir_entry_2 **dest_de);
++ struct ext4_dir_entry_2 **dest_de, int *dlen);
+ void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+- const char *name, int namelen);
++ const char *name, int namelen, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
+ if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+@@ -2004,11 +2046,18 @@ static unsigned char ext4_filetype_table
+
+ static inline unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
++ int fl_index = filetype & EXT4_FT_MASK;
++
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
+- (filetype >= EXT4_FT_MAX))
++ (fl_index >= EXT4_FT_MAX))
+ return DT_UNKNOWN;
+
+- return ext4_filetype_table[filetype];
++ if (!test_opt(sb, DIRDATA))
++ return (ext4_filetype_table[fl_index]);
++
++ return (ext4_filetype_table[fl_index]) |
++ (filetype & EXT4_DIRENT_LUFID);
++
+ }
+
+ /* fsync.c */
+@@ -2157,7 +2206,7 @@ extern struct buffer_head * ext4_find_en
+ struct ext4_dir_entry_2 ** res_dir,
+ int *inlined);
+ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+- struct inode *inode);
++ struct inode *inode, const void *, const void *);
+ extern int search_dir(struct buffer_head *bh,
+ char *search_buf,
+ int buf_size,
+@@ -2761,6 +2810,28 @@ extern struct mutex ext4__aio_mutex[EXT4
+ extern int ext4_resize_begin(struct super_block *sb);
+ extern void ext4_resize_end(struct super_block *sb);
+
++/*
++ * Compute the total directory entry data length.
++ * This includes the filename and an implicit NUL terminator (always present),
++ * and optional extensions. Each extension has a bit set in the high 4 bits of
++ * de->file_type, and the extension length is the first byte in each entry.
++ */
++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
++{
++ char *len = de->name + de->name_len + 1 /* NUL terminator */;
++ int dlen = 0;
++ __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
++
++ while (extra_data_flags) {
++ if (extra_data_flags & 1) {
++ dlen += *len + (dlen == 0);
++ len += *len;
++ }
++ extra_data_flags >>= 1;
++ }
++ return dlen;
++}
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _EXT4_H */
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+@@ -239,7 +239,8 @@ static unsigned dx_get_count(struct dx_e
+ static unsigned dx_get_limit(struct dx_entry *entries);
+ static void dx_set_count(struct dx_entry *entries, unsigned value);
+ static void dx_set_limit(struct dx_entry *entries, unsigned value);
+-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
++static inline unsigned dx_root_limit(struct inode *dir,
++ struct ext4_dir_entry_2 *dot_de, unsigned infosize);
+ static unsigned dx_node_limit(struct inode *dir);
+ static struct dx_frame *dx_probe(const struct qstr *d_name,
+ struct inode *dir,
+@@ -504,11 +505,12 @@ ext4_next_entry(struct ext4_dir_entry_2
+ */
+ struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
+ {
+- /* get dotdot first */
+- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++ BUG_ON(de->name_len != 1);
++ /* get dotdot first */
++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+
+- /* dx root info is after dotdot entry */
+- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++ /* dx root info is after dotdot entry */
++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+
+ return (struct dx_root_info *) de;
+ }
+@@ -553,10 +555,16 @@ static inline void dx_set_limit(struct d
+ ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+ }
+
+-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
++static inline unsigned dx_root_limit(struct inode *dir,
++ struct ext4_dir_entry_2 *dot_de, unsigned infosize)
+ {
+- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+- EXT4_DIR_REC_LEN(2) - infosize;
++ struct ext4_dir_entry_2 *dotdot_de;
++ unsigned entry_space;
++
++ BUG_ON(dot_de->name_len != 1);
++ dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
++ entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) -
++ EXT4_DIR_REC_LEN(dotdot_de) - infosize;
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+@@ -566,7 +574,7 @@ static inline unsigned dx_root_limit(str
+
+ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
++ unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+@@ -617,7 +625,7 @@ static struct stats dx_show_leaf(struct
+ printk(":%x.%u ", h.hash,
+ (unsigned) ((char *) de - base));
+ }
+- space += EXT4_DIR_REC_LEN(de->name_len);
++ space += EXT4_DIR_REC_LEN(de);
+ names++;
+ }
+ de = ext4_next_entry(de, size);
+@@ -723,6 +731,7 @@ dx_probe(const struct qstr *d_name, stru
+ entries = (struct dx_entry *) (((char *)info) + info->info_length);
+
+ if (dx_get_limit(entries) != dx_root_limit(dir,
++ (struct ext4_dir_entry_2*)bh->b_data,
+ info->info_length)) {
+ ext4_warning(dir->i_sb, "dx entry: limit != root limit");
+ brelse(bh);
+@@ -916,7 +925,7 @@ static int htree_dirblock_to_tree(struct
+ de = (struct ext4_dir_entry_2 *) bh->b_data;
+ top = (struct ext4_dir_entry_2 *) ((char *) de +
+ dir->i_sb->s_blocksize -
+- EXT4_DIR_REC_LEN(0));
++ __EXT4_DIR_REC_LEN(0));
+ for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ bh->b_data, bh->b_size,
+@@ -1508,7 +1517,7 @@ dx_move_dirents(char *from, char *to, st
+ while (count--) {
+ struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
+ (from + (map->offs<<2));
+- rec_len = EXT4_DIR_REC_LEN(de->name_len);
++ rec_len = EXT4_DIR_REC_LEN(de);
+ memcpy (to, de, rec_len);
+ ((struct ext4_dir_entry_2 *) to)->rec_len =
+ ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1532,7 +1541,7 @@ static struct ext4_dir_entry_2* dx_pack_
+ while ((char*)de < base + blocksize) {
+ next = ext4_next_entry(de, blocksize);
+ if (de->inode && de->name_len) {
+- rec_len = EXT4_DIR_REC_LEN(de->name_len);
++ rec_len = EXT4_DIR_REC_LEN(de);
+ if (de > to)
+ memmove(to, de, rec_len);
+ to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1664,14 +1673,15 @@ int ext4_find_dest_de(struct inode *dir,
+ struct buffer_head *bh,
+ void *buf, int buf_size,
+ const char *name, int namelen,
+- struct ext4_dir_entry_2 **dest_de)
++ struct ext4_dir_entry_2 **dest_de, int *dlen)
+ {
+ struct ext4_dir_entry_2 *de;
+- unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
++ unsigned short reclen = __EXT4_DIR_REC_LEN(namelen) + (dlen ? *dlen:0);
+ int nlen, rlen;
+ unsigned int offset = 0;
+ char *top;
+
++ dlen ? *dlen = 0: 0; /* default set to 0 */
+ de = (struct ext4_dir_entry_2 *)buf;
+ top = buf + buf_size - reclen;
+ while ((char *) de <= top) {
+@@ -1680,10 +1690,26 @@ int ext4_find_dest_de(struct inode *dir,
+ return -EIO;
+ if (ext4_match(namelen, name, de))
+ return -EEXIST;
+- nlen = EXT4_DIR_REC_LEN(de->name_len);
++ nlen = EXT4_DIR_REC_LEN(de);
+ rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+ if ((de->inode ? rlen - nlen : rlen) >= reclen)
+ break;
++ /* Then for dotdot entries, check for the smaller space
++ * required for just the entry, no FID */
++ if (namelen == 2 && memcmp(name, "..", 2) == 0) {
++ if ((de->inode ? rlen - nlen : rlen) >=
++ __EXT4_DIR_REC_LEN(namelen)) {
++ /* set dlen=1 to indicate not
++ * enough space store fid */
++ dlen ? *dlen = 1 : 0;
++ break;
++ }
++ /* The new ".." entry must be written over the
++ * previous ".." entry, which is the first
++ * entry traversed by this scan. If it doesn't
++ * fit, something is badly wrong, so -EIO. */
++ return -EIO;
++ }
+ de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+ offset += rlen;
+ }
+@@ -1697,12 +1723,12 @@ int ext4_find_dest_de(struct inode *dir,
+ void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+- const char *name, int namelen)
++ const char *name, int namelen, void *data)
+ {
+
+ int nlen, rlen;
+
+- nlen = EXT4_DIR_REC_LEN(de->name_len);
++ nlen = EXT4_DIR_REC_LEN(de);
+ rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+ if (de->inode) {
+ struct ext4_dir_entry_2 *de1 =
+@@ -1716,6 +1742,11 @@ void ext4_insert_dentry(struct inode *in
+ ext4_set_de_type(inode->i_sb, de, inode->i_mode);
+ de->name_len = namelen;
+ memcpy(de->name, name, namelen);
++ if (data) {
++ de->name[namelen] = 0;
++ memcpy(&de->name[namelen + 1], data, *(char *)data);
++ de->file_type |= EXT4_DIRENT_LUFID;
++ }
+ }
+ /*
+ * Add a new entry into a directory (leaf) block. If de is non-NULL,
+@@ -1734,15 +1765,20 @@ static int add_dirent_to_buf(handle_t *h
+ int namelen = dentry->d_name.len;
+ unsigned int blocksize = dir->i_sb->s_blocksize;
+ int csum_size = 0;
+- int err;
++ int err, dlen = 0;
++ unsigned char *data;
+
++ data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
++ dentry->d_fsdata);
+ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ if (!de) {
++ if (data)
++ dlen = (*data) + 1;
+ err = ext4_find_dest_de(dir, inode,
+ bh, bh->b_data, blocksize - csum_size,
+- name, namelen, &de);
++ name, namelen, &de, &dlen);
+ if (err)
+ return err;
+ }
+@@ -1755,7 +1791,10 @@ static int add_dirent_to_buf(handle_t *h
+ }
+
+ /* By now the buffer is marked for journaling */
+- ext4_insert_dentry(inode, de, blocksize, name, namelen);
++ /* If we're writing the short form of "dotdot", don't add the data section */
++ if (dlen == 1)
++ data = NULL;
++ ext4_insert_dentry(inode, de, blocksize, name, namelen, data);
+
+ /*
+ * XXX shouldn't update any times until successful
+@@ -1866,7 +1905,8 @@ static int make_indexed_dir(handle_t *ha
+
+ dx_set_block(entries, 1);
+ dx_set_count(entries, 1);
+- dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
++ dx_set_limit(entries, dx_root_limit(dir,
++ dot_de, sizeof(*dx_info)));
+
+ /* Initialize as for dx_probe */
+ hinfo.hash_version = dx_info->hash_version;
+@@ -1909,6 +1949,8 @@ static int ext4_update_dotdot(handle_t *
+ struct buffer_head * dir_block;
+ struct ext4_dir_entry_2 * de;
+ int len, journal = 0, err = 0;
++ int dlen = 0;
++ char *data;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+@@ -1924,19 +1966,24 @@ static int ext4_update_dotdot(handle_t *
+ /* the first item must be "." */
+ assert(de->name_len == 1 && de->name[0] == '.');
+ len = le16_to_cpu(de->rec_len);
+- assert(len >= EXT4_DIR_REC_LEN(1));
+- if (len > EXT4_DIR_REC_LEN(1)) {
++ assert(len >= __EXT4_DIR_REC_LEN(1));
++ if (len > __EXT4_DIR_REC_LEN(1)) {
+ BUFFER_TRACE(dir_block, "get_write_access");
+ err = ext4_journal_get_write_access(handle, dir_block);
+ if (err)
+ goto out_journal;
+
+ journal = 1;
+- de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
+ }
+
+- len -= EXT4_DIR_REC_LEN(1);
+- assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++ len -= EXT4_DIR_REC_LEN(de);
++ data = ext4_dentry_get_data(dir->i_sb,
++ (struct ext4_dentry_param *)dentry->d_fsdata);
++ if (data)
++ dlen = *data + 1;
++ assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
++
+ de = (struct ext4_dir_entry_2 *)
+ ((char *) de + le16_to_cpu(de->rec_len));
+ if (!journal) {
+@@ -1950,10 +1997,15 @@ static int ext4_update_dotdot(handle_t *
+ if (len > 0)
+ de->rec_len = cpu_to_le16(len);
+ else
+- assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++ assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
+ de->name_len = 2;
+ strcpy (de->name, "..");
+- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++ if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
++ de->name[2] = 0;
++ memcpy(&de->name[2 + 1], data, *data);
++ ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++ de->file_type |= EXT4_DIRENT_LUFID;
++ }
+
+ out_journal:
+ if (journal) {
+@@ -2428,30 +2480,61 @@ retry:
+ return err;
+ }
+
++struct tp_block {
++ struct inode *inode;
++ void *data1;
++ void *data2;
++};
++
+ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int blocksize, int csum_size,
+ unsigned int parent_ino, int dotdot_real_len)
+ {
++ void *data1 = NULL, *data2 = NULL;
++ int dot_reclen = 0;
++
++ if (dotdot_real_len == 10) {
++ struct tp_block *tpb = (struct tp_block*)inode;
++ data1 = tpb->data1;
++ data2 = tpb->data2;
++ inode = tpb->inode;
++ dotdot_real_len = 0;
++ }
+ de->inode = cpu_to_le32(inode->i_ino);
+ de->name_len = 1;
+- de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+- blocksize);
+ strcpy(de->name, ".");
+ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
++ /* get packed fid data*/
++ data1 = ext4_dentry_get_data(inode->i_sb,
++ (struct ext4_dentry_param *) data1);
++ if (data1) {
++ de->name[1] = 0;
++ memcpy(&de->name[2], data1, *(char *) data1);
++ de->file_type |= EXT4_DIRENT_LUFID;
++ }
++ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
++ dot_reclen = cpu_to_le16(de->rec_len);
+ de = ext4_next_entry(de, blocksize);
+ de->inode = cpu_to_le32(parent_ino);
+ de->name_len = 2;
++ strcpy(de->name, "..");
++ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++ data2 = ext4_dentry_get_data(inode->i_sb,
++ (struct ext4_dentry_param *) data2);
++ if (data2) {
++ de->name[2] = 0;
++ memcpy(&de->name[3], data2, *(char *) data2);
++ de->file_type |= EXT4_DIRENT_LUFID;
++ }
+ if (!dotdot_real_len)
+ de->rec_len = ext4_rec_len_to_disk(blocksize -
+- (csum_size + EXT4_DIR_REC_LEN(1)),
++ (csum_size + dot_reclen),
+ blocksize);
+ else
+ de->rec_len = ext4_rec_len_to_disk(
+- EXT4_DIR_REC_LEN(de->name_len), blocksize);
++ EXT4_DIR_REC_LEN(de), blocksize);
+- strcpy(de->name, "..");
+- ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
+ return ext4_next_entry(de, blocksize);
+ }
+@@ -2457,8 +2540,10 @@ struct ext4_dir_entry_2 *ext4_init_dot_d
+ }
+
+ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+- struct inode *inode)
++ struct inode *inode,
++ const void *data1, const void *data2)
+ {
++ struct tp_block param;
+ struct buffer_head *dir_block = NULL;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+@@ -2488,7 +2573,11 @@ static int ext4_init_new_dir(handle_t *h
+ if (err)
+ goto out;
+ de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+- ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
++ param.inode = inode;
++ param.data1 = (void *)data1;
++ param.data2 = (void *)data2;
++ ext4_init_dot_dotdot((struct inode *)(¶m), de, blocksize,
++ csum_size, dir->i_ino, 10);
+ set_nlink(inode, 2);
+ if (csum_size) {
+ t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+@@ -2508,7 +2597,8 @@ out:
+ /* Initialize @inode as a subdirectory of @dir, and add the
+ * "." and ".." entries into the first directory block. */
+ int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
+- struct inode *inode)
++ struct inode *inode,
++ const void *data1, const void *data2)
+ {
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+@@ -2518,7 +2608,7 @@ int ext4_add_dot_dotdot(handle_t *handle
+
+ inode->i_op = &ext4_dir_inode_operations;
+ inode->i_fop = &ext4_dir_operations;
+- return ext4_init_new_dir(handle, dir, inode);
++ return ext4_init_new_dir(handle, dir, inode, data1, data2);
+ }
+ EXPORT_SYMBOL(ext4_add_dot_dotdot);
+
+@@ -2546,7 +2636,7 @@ retry:
+
+ inode->i_op = &ext4_dir_inode_operations;
+ inode->i_fop = &ext4_dir_operations;
+- err = ext4_init_new_dir(handle, dir, inode);
++ err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
+ if (err)
+ goto out_clear_inode;
+ err = ext4_mark_inode_dirty(handle, inode);
+@@ -2598,7 +2688,7 @@ static int empty_dir(struct inode *inode
+ }
+
+ sb = inode->i_sb;
+- if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
++ if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) {
+ EXT4_ERROR_INODE(inode, "invalid size");
+ return 1;
+ }
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inline.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inline.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inline.c
+@@ -988,7 +998,7 @@ static int ext4_add_dirent_to_inline(han
+
+ err = ext4_find_dest_de(dir, inode, iloc->bh,
+ inline_start, inline_size,
+- name, namelen, &de);
++ name, namelen, &de, NULL);
+ if (err)
+ return err;
+
+@@ -998,7 +998,7 @@ static int ext4_add_dirent_to_inline(han
+ err = ext4_journal_get_write_access(handle, iloc->bh);
+ if (err)
+ return err;
+- ext4_insert_dentry(inode, de, inline_size, name, namelen);
++ ext4_insert_dentry(inode, de, inline_size, name, namelen, NULL);
+
+ ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
+
+@@ -1078,7 +1078,7 @@ static int ext4_update_inline_dir(handle
+ int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
+ int new_size = get_max_inline_xattr_value_size(dir, iloc);
+
+- if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
++ if (new_size - old_size <= __EXT4_DIR_REC_LEN(1))
+ return -ENOSPC;
+
+ ret = ext4_update_inline_data(handle, dir,
+@@ -1348,7 +1348,7 @@ int htree_inlinedir_to_tree(struct file
+ fake.name_len = 1;
+ strcpy(fake.name, ".");
+ fake.rec_len = ext4_rec_len_to_disk(
+- EXT4_DIR_REC_LEN(fake.name_len),
++ EXT4_DIR_REC_LEN(&fake),
+ inline_size);
+ ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+ de = &fake;
+@@ -1358,7 +1358,7 @@ int htree_inlinedir_to_tree(struct file
+ fake.name_len = 2;
+ strcpy(fake.name, "..");
+ fake.rec_len = ext4_rec_len_to_disk(
+- EXT4_DIR_REC_LEN(fake.name_len),
++ EXT4_DIR_REC_LEN(&fake),
+ inline_size);
+ ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+ de = &fake;
+@@ -1455,8 +1455,8 @@ int ext4_read_inline_dir(struct file *fi
+ * So we will use extra_offset and extra_size to indicate them
+ * during the inline dir iteration.
+ */
+- dotdot_offset = EXT4_DIR_REC_LEN(1);
+- dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
++ dotdot_offset = __EXT4_DIR_REC_LEN(1);
++ dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2);
+ extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
+ extra_size = extra_offset + inline_size;
+
+@@ -1493,7 +1493,7 @@ revalidate:
+ * failure will be detected in the
+ * dirent test below. */
+ if (ext4_rec_len_from_disk(de->rec_len,
+- extra_size) < EXT4_DIR_REC_LEN(1))
++ extra_size) < __EXT4_DIR_REC_LEN(1))
+ break;
+ i += ext4_rec_len_from_disk(de->rec_len,
+ extra_size);
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/super.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+@@ -1151,7 +1151,7 @@ enum {
+ Opt_data_err_abort, Opt_data_err_ignore,
+ Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+ Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+- Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
++ Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
+ Opt_usrquota, Opt_grpquota, Opt_i_version,
+ Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
+ Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
+@@ -1216,6 +1216,7 @@ static const match_table_t tokens = {
+ {Opt_stripe, "stripe=%u"},
+ {Opt_delalloc, "delalloc"},
+ {Opt_nodelalloc, "nodelalloc"},
++ {Opt_dirdata, "dirdata"},
+ {Opt_removed, "mblk_io_submit"},
+ {Opt_removed, "nomblk_io_submit"},
+ {Opt_block_validity, "block_validity"},
+@@ -1424,6 +1425,7 @@ static const struct mount_opts {
+ {Opt_usrjquota, 0, MOPT_Q},
+ {Opt_grpjquota, 0, MOPT_Q},
+ {Opt_offusrjquota, 0, MOPT_Q},
++ {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
+ {Opt_offgrpjquota, 0, MOPT_Q},
+ {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
+ {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
--- /dev/null
+mbcache provides absolutely no value for Lustre xattrs (because
+they are unique and cannot be shared between files) and as we can
+see it has a noticable overhead in some cases. In the past there
+was a CONFIG_MBCACHE option that would allow it to be disabled,
+but this was removed in newer kernels, so we will need to patch
+ldiskfs to fix this.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -944,6 +944,7 @@ struct ext4_inode_info {
+ /*
+ * Mount flags set via mount options or defaults
+ */
++#define EXT4_MOUNT_NO_MBCACHE 0x00001 /* Disable mbcache */
+ #define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */
+ #define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */
+ #define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/super.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+@@ -1157,6 +1157,7 @@ enum {
+ Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
+ Opt_inode_readahead_blks, Opt_journal_ioprio,
+ Opt_dioread_nolock, Opt_dioread_lock,
++ Opt_no_mbcache,
+ Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
+ Opt_max_dir_size_kb,
+ };
+@@ -1231,6 +1232,7 @@ static const match_table_t tokens = {
+ {Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
+ {Opt_init_itable, "init_itable=%u"},
++ {Opt_no_mbcache, "no_mbcache"},
+ {Opt_init_itable, "init_itable"},
+ {Opt_noinit_itable, "noinit_itable"},
+ {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
+@@ -1390,6 +1392,7 @@ static const struct mount_opts {
+ {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
+ {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
+ {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
++ {Opt_no_mbcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
+ {Opt_commit, 0, MOPT_GTE0},
+ {Opt_max_batch_time, 0, MOPT_GTE0},
+ {Opt_min_batch_time, 0, MOPT_GTE0},
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/xattr.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.c
+@@ -81,7 +81,8 @@
+ # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+ #endif
+
+-static void ext4_xattr_cache_insert(struct buffer_head *);
++static void ext4_xattr_cache_insert(struct super_block *,
++ struct buffer_head *);
+ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
+ struct ext4_xattr_header *,
+ struct mb_cache_entry **);
+@@ -385,7 +386,7 @@ bad_block:
+ error = -EIO;
+ goto cleanup;
+ }
+- ext4_xattr_cache_insert(bh);
++ ext4_xattr_cache_insert(inode->i_sb, bh);
+ entry = BFIRST(bh);
+ error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
+ inode);
+@@ -546,7 +547,7 @@ ext4_xattr_block_list(struct dentry *den
+ error = -EIO;
+ goto cleanup;
+ }
+- ext4_xattr_cache_insert(bh);
++ ext4_xattr_cache_insert(inode->i_sb, bh);
+ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
+
+ cleanup:
+@@ -643,7 +644,9 @@ ext4_xattr_release_block(handle_t *handl
+ struct mb_cache_entry *ce = NULL;
+ int error = 0;
+
+- ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
++ if (!test_opt(inode->i_sb, NO_MBCACHE))
++ ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev,
++ bh->b_blocknr);
+ BUFFER_TRACE(bh, "get_write_access");
+ error = ext4_journal_get_write_access(handle, bh);
+ if (error)
+@@ -1037,8 +1040,10 @@ ext4_xattr_block_set(handle_t *handle, s
+ #define header(x) ((struct ext4_xattr_header *)(x))
+
+ if (s->base) {
+- ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+- bs->bh->b_blocknr);
++ if (!test_opt(inode->i_sb, NO_MBCACHE))
++ ce = mb_cache_entry_get(ext4_xattr_cache,
++ bs->bh->b_bdev,
++ bs->bh->b_blocknr);
+ BUFFER_TRACE(bs->bh, "get_write_access");
+ error = ext4_journal_get_write_access(handle, bs->bh);
+ if (error)
+@@ -1055,7 +1060,7 @@ ext4_xattr_block_set(handle_t *handle, s
+ if (!IS_LAST_ENTRY(s->first))
+ ext4_xattr_rehash(header(s->base),
+ s->here);
+- ext4_xattr_cache_insert(bs->bh);
++ ext4_xattr_cache_insert(sb, bs->bh);
+ }
+ unlock_buffer(bs->bh);
+ if (error == -EIO)
+@@ -1138,7 +1143,8 @@ inserted:
+ if (error)
+ goto cleanup_dquot;
+ }
+- mb_cache_entry_release(ce);
++ if (ce)
++ mb_cache_entry_release(ce);
+ ce = NULL;
+ } else if (bs->bh && s->base == bs->bh->b_data) {
+ /* We were modifying this block in-place. */
+@@ -1191,7 +1197,7 @@ getblk_failed:
+ memcpy(new_bh->b_data, s->base, new_bh->b_size);
+ set_buffer_uptodate(new_bh);
+ unlock_buffer(new_bh);
+- ext4_xattr_cache_insert(new_bh);
++ ext4_xattr_cache_insert(sb, new_bh);
+ error = ext4_handle_dirty_xattr_block(handle,
+ inode, new_bh);
+ if (error)
+@@ -1938,12 +1944,15 @@ ext4_xattr_put_super(struct super_block
+ * Returns 0, or a negative error number on failure.
+ */
+ static void
+-ext4_xattr_cache_insert(struct buffer_head *bh)
++ext4_xattr_cache_insert(struct super_block *sb, struct buffer_head *bh)
+ {
+ __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+ struct mb_cache_entry *ce;
+ int error;
+
++ if (test_opt(sb, NO_MBCACHE))
++ return;
++
+ ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
+ if (!ce) {
+ ea_bdebug(bh, "out of memory");
+@@ -2016,6 +2025,8 @@ ext4_xattr_cache_find(struct inode *inod
+ __u32 hash = le32_to_cpu(header->h_hash);
+ struct mb_cache_entry *ce;
+
++ if (test_opt(inode->i_sb, NO_MBCACHE))
++ return NULL;
+ if (!header->h_hash)
+ return NULL; /* never share */
+ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
--- /dev/null
+Index: linux-3.10.0-123.9.3.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.9.3.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.9.3.el7.x86_64/fs/ext4/namei.c
+@@ -1894,6 +1894,72 @@ static int make_indexed_dir(handle_t *ha
+ return retval;
+ }
+
++/* update ".." for hash-indexed directory, split the item "." if necessary */
++static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct inode * dir = dentry->d_parent->d_inode;
++ struct buffer_head * dir_block;
++ struct ext4_dir_entry_2 * de;
++ int len, journal = 0, err = 0;
++
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(dir))
++ handle->h_sync = 1;
++
++ dir_block = ext4_bread(handle, dir, 0, 0, &err);
++ if (!dir_block)
++ goto out;
++
++ de = (struct ext4_dir_entry_2 *)dir_block->b_data;
++ /* the first item must be "." */
++ assert(de->name_len == 1 && de->name[0] == '.');
++ len = le16_to_cpu(de->rec_len);
++ assert(len >= EXT4_DIR_REC_LEN(1));
++ if (len > EXT4_DIR_REC_LEN(1)) {
++ BUFFER_TRACE(dir_block, "get_write_access");
++ err = ext4_journal_get_write_access(handle, dir_block);
++ if (err)
++ goto out_journal;
++
++ journal = 1;
++ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++ }
++
++ len -= EXT4_DIR_REC_LEN(1);
++ assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++ de = (struct ext4_dir_entry_2 *)
++ ((char *) de + le16_to_cpu(de->rec_len));
++ if (!journal) {
++ BUFFER_TRACE(dir_block, "get_write_access");
++ err = ext4_journal_get_write_access(handle, dir_block);
++ if (err)
++ goto out_journal;
++ }
++
++ de->inode = cpu_to_le32(inode->i_ino);
++ if (len > 0)
++ de->rec_len = cpu_to_le16(len);
++ else
++ assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++ de->name_len = 2;
++ strcpy (de->name, "..");
++ ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++
++out_journal:
++ if (journal) {
++ BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
++ err = ext4_handle_dirty_dirent_node(handle, dir, dir_block);
++ ext4_mark_inode_dirty(handle, dir);
++ }
++ brelse (dir_block);
++
++out:
++ return err;
++}
++
+ /*
+ * ext4_add_entry()
+ *
+@@ -1938,6 +2004,9 @@ int ext4_add_entry(handle_t *handle, str
+ }
+
+ if (is_dx(dir)) {
++ if (dentry->d_name.len == 2 &&
++ memcmp(dentry->d_name.name, "..", 2) == 0)
++ return ext4_update_dotdot(handle, dentry, inode);
+ retval = ext4_dx_add_entry(handle, dentry, inode);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+ return retval;
--- /dev/null
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
+@@ -4286,10 +4286,10 @@ struct inode *ext4_iget(struct super_blo
+ EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+
+ if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
+- inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
++ ei->i_fs_version = le32_to_cpu(raw_inode->i_disk_version);
+ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+ if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+- inode->i_version |=
++ ei->i_fs_version |=
+ (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
+ }
+ }
+@@ -4506,11 +4506,11 @@ static int ext4_do_update_inode(handle_t
+ }
+
+ if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
+- raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
++ raw_inode->i_disk_version = cpu_to_le32(ei->i_fs_version);
+ if (ei->i_extra_isize) {
+ if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+ raw_inode->i_version_hi =
+- cpu_to_le32(inode->i_version >> 32);
++ cpu_to_le32(ei->i_fs_version >> 32);
+ raw_inode->i_extra_isize =
+ cpu_to_le16(ei->i_extra_isize);
+ }
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/ialloc.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/ialloc.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/ialloc.c
+@@ -898,6 +898,7 @@ got:
+ ei->i_dtime = 0;
+ ei->i_block_group = group;
+ ei->i_last_alloc_group = ~0;
++ ei->i_fs_version = 0;
+
+ ext4_set_inode_flags(inode);
+ if (IS_DIRSYNC(inode))
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+@@ -921,6 +921,8 @@ struct ext4_inode_info {
+ tid_t i_sync_tid;
+ tid_t i_datasync_tid;
+
++ __u64 i_fs_version;
++
+ /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+ __u32 i_csum_seed;
+ };
--- /dev/null
+removes static definition of dx_root struct. so that "." and ".." dirent can
+have extra data. This patch does not change any functionality but is required for
+ext4_data_in_dirent patch.
+
+Index: linux-3.10.0-123.9.3.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.9.3.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.9.3.el7.x86_64/fs/ext4/namei.c
+@@ -193,22 +193,13 @@ struct dx_entry
+ * hash version mod 4 should never be 0. Sincerely, the paranoia department.
+ */
+
+-struct dx_root
++struct dx_root_info
+ {
+- struct fake_dirent dot;
+- char dot_name[4];
+- struct fake_dirent dotdot;
+- char dotdot_name[4];
+- struct dx_root_info
+- {
+- __le32 reserved_zero;
+- u8 hash_version;
+- u8 info_length; /* 8 */
+- u8 indirect_levels;
+- u8 unused_flags;
+- }
+- info;
+- struct dx_entry entries[0];
++ __le32 reserved_zero;
++ u8 hash_version;
++ u8 info_length; /* 8 */
++ u8 indirect_levels;
++ u8 unused_flags;
+ };
+
+ struct dx_node
+@@ -511,6 +502,16 @@ ext4_next_entry(struct ext4_dir_entry_2
+ * Future: use high four bits of block for coalesce-on-delete flags
+ * Mask them off for now.
+ */
++struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
++{
++ /* get dotdot first */
++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++
++ /* dx root info is after dotdot entry */
++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++
++ return (struct dx_root_info *) de;
++}
+
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+@@ -673,7 +674,7 @@ dx_probe(const struct qstr *d_name, stru
+ {
+ unsigned count, indirect;
+ struct dx_entry *at, *entries, *p, *q, *m;
+- struct dx_root *root;
++ struct dx_root_info * info;
+ struct buffer_head *bh;
+ struct dx_frame *frame = frame_in;
+ u32 hash;
+@@ -684,17 +685,18 @@ dx_probe(const struct qstr *d_name, stru
+ *err = PTR_ERR(bh);
+ goto fail;
+ }
+- root = (struct dx_root *) bh->b_data;
+- if (root->info.hash_version != DX_HASH_TEA &&
+- root->info.hash_version != DX_HASH_HALF_MD4 &&
+- root->info.hash_version != DX_HASH_LEGACY) {
++
++ info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
++ if (info->hash_version != DX_HASH_TEA &&
++ info->hash_version != DX_HASH_HALF_MD4 &&
++ info->hash_version != DX_HASH_LEGACY) {
+ ext4_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
+- "#%lu", root->info.hash_version, dir->i_ino);
++ "#%lu", info->hash_version, dir->i_ino);
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+ }
+- hinfo->hash_version = root->info.hash_version;
++ hinfo->hash_version = info->hash_version;
+ if (hinfo->hash_version <= DX_HASH_TEA)
+ hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+ hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -702,27 +704,26 @@ dx_probe(const struct qstr *d_name, stru
+ ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+ hash = hinfo->hash;
+
+- if (root->info.unused_flags & 1) {
++ if (info->unused_flags & 1) {
+ ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
+- root->info.unused_flags);
++ info->unused_flags);
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+ }
+
+- if ((indirect = root->info.indirect_levels) > 1) {
++ if ((indirect = info->indirect_levels) > 1) {
+ ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
+- root->info.indirect_levels);
++ info->indirect_levels);
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+ }
+
+- entries = (struct dx_entry *) (((char *)&root->info) +
+- root->info.info_length);
++ entries = (struct dx_entry *) (((char *)info) + info->info_length);
+
+ if (dx_get_limit(entries) != dx_root_limit(dir,
+- root->info.info_length)) {
++ info->info_length)) {
+ ext4_warning(dir->i_sb, "dx entry: limit != root limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+@@ -807,10 +808,12 @@ fail:
+
+ static void dx_release (struct dx_frame *frames)
+ {
++ struct dx_root_info *info;
+ if (frames[0].bh == NULL)
+ return;
+
+- if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
++ info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
++ if (info->indirect_levels)
+ brelse(frames[1].bh);
+ brelse(frames[0].bh);
+ }
+@@ -1787,10 +1790,9 @@ static int make_indexed_dir(handle_t *ha
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ struct buffer_head *bh2;
+- struct dx_root *root;
+ struct dx_frame frames[2], *frame;
+ struct dx_entry *entries;
+- struct ext4_dir_entry_2 *de, *de2;
++ struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+ struct ext4_dir_entry_tail *t;
+ char *data1, *top;
+ unsigned len;
+@@ -1798,7 +1800,7 @@ static int make_indexed_dir(handle_t *ha
+ unsigned blocksize;
+ struct dx_hash_info hinfo;
+ ext4_lblk_t block;
+- struct fake_dirent *fde;
++ struct dx_root_info *dx_info;
+ int csum_size = 0;
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+@@ -1813,18 +1815,19 @@ static int make_indexed_dir(handle_t *ha
+ brelse(bh);
+ return retval;
+ }
+- root = (struct dx_root *) bh->b_data;
++
++ dot_de = (struct ext4_dir_entry_2 *) bh->b_data;
++ dotdot_de = ext4_next_entry(dot_de, blocksize);
+
+ /* The 0th block becomes the root, move the dirents out */
+- fde = &root->dotdot;
+- de = (struct ext4_dir_entry_2 *)((char *)fde +
+- ext4_rec_len_from_disk(fde->rec_len, blocksize));
+- if ((char *) de >= (((char *) root) + blocksize)) {
++ de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
++ ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
++ if ((char *) de >= (((char *) dot_de) + blocksize)) {
+ EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+ brelse(bh);
+ return -EIO;
+ }
+- len = ((char *) root) + (blocksize - csum_size) - (char *) de;
++ len = ((char *) dot_de) + (blocksize - csum_size) - (char *) de;
+
+ /* Allocate new block for the 0th block's dirents */
+ bh2 = ext4_append(handle, dir, &block);
+@@ -1850,19 +1853,23 @@ static int make_indexed_dir(handle_t *ha
+ }
+
+ /* Initialize the root; the dot dirents already exist */
+- de = (struct ext4_dir_entry_2 *) (&root->dotdot);
+- de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
+- blocksize);
+- memset (&root->info, 0, sizeof(root->info));
+- root->info.info_length = sizeof(root->info);
+- root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+- entries = root->entries;
++ dotdot_de->rec_len = ext4_rec_len_to_disk(blocksize -
++ le16_to_cpu(dot_de->rec_len), blocksize);
++
++ /* initialize hashing info */
++ dx_info = dx_get_dx_info(dot_de);
++ memset (dx_info, 0, sizeof(*dx_info));
++ dx_info->info_length = sizeof(*dx_info);
++ dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++
++ entries = (void *)dx_info + sizeof(*dx_info);
++
+ dx_set_block(entries, 1);
+ dx_set_count(entries, 1);
+- dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
++ dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
+
+ /* Initialize as for dx_probe */
+- hinfo.hash_version = root->info.hash_version;
++ hinfo.hash_version = dx_info->hash_version;
+ if (hinfo.hash_version <= DX_HASH_TEA)
+ hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+ hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -2152,6 +2159,7 @@ static int ext4_dx_add_entry(handle_t *h
+ goto journal_error;
+ brelse (bh2);
+ } else {
++ struct dx_root_info * info;
+ dxtrace(printk(KERN_DEBUG
+ "Creating second level index...\n"));
+ memcpy((char *) entries2, (char *) entries,
+@@ -2161,7 +2169,9 @@ static int ext4_dx_add_entry(handle_t *h
+ /* Set up root */
+ dx_set_count(entries, 1);
+ dx_set_block(entries + 0, newblock);
+- ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++ info = dx_get_dx_info((struct ext4_dir_entry_2*)
++ frames[0].bh->b_data);
++ info->indirect_levels = 1;
+
+ /* Add new access path frame */
+ frame = frames + 1;
--- /dev/null
+This patch implements the large EA support in ext4. If the size of
+an EA value is larger than the blocksize, then the EA value would
+not be saved in the external EA block, instead it would be saved
+in an external EA inode. So, the patch also helps support a larger
+number of EAs.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -1534,6 +1534,7 @@ static inline void ext4_clear_state_flag
+ EXT4_FEATURE_INCOMPAT_EXTENTS| \
+ EXT4_FEATURE_INCOMPAT_64BIT| \
+ EXT4_FEATURE_INCOMPAT_FLEX_BG| \
++ EXT4_FEATURE_INCOMPAT_EA_INODE| \
+ EXT4_FEATURE_INCOMPAT_MMP | \
+ EXT4_FEATURE_INCOMPAT_DIRDATA| \
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA)
+@@ -1934,6 +1935,12 @@ struct mmpd_data {
+ #define EXT4_MMP_MAX_CHECK_INTERVAL 300UL
+
+ /*
++ * Maximum size of xattr attributes for FEATURE_INCOMPAT_EA_INODE 1Mb
++ * This limit is arbitrary, but is reasonable for the xattr API.
++ */
++#define EXT4_XATTR_MAX_LARGE_EA_SIZE (1024 * 1024)
++
++/*
+ * Function prototypes
+ */
+
+@@ -1945,6 +1952,10 @@ struct mmpd_data {
+ # define ATTRIB_NORET __attribute__((noreturn))
+ # define NORET_AND noreturn,
+
++struct ext4_xattr_ino_array {
++ unsigned int xia_count; /* # of used item in the array */
++ unsigned int xia_inodes[0];
++};
+ /* bitmap.c */
+ extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
+ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+@@ -2157,6 +2168,7 @@ extern void ext4_set_inode_flags(struct
+ extern void ext4_get_inode_flags(struct ext4_inode_info *);
+ extern int ext4_alloc_da_blocks(struct inode *inode);
+ extern void ext4_set_aops(struct inode *inode);
++extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int chunk);
+ extern int ext4_writepage_trans_blocks(struct inode *);
+ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+ extern int ext4_discard_partial_page_buffers(handle_t *handle,
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+@@ -133,8 +183,6 @@ void ext4_evict_inode(struct inode *inod
+ unsigned int length);
+ static int __ext4_journalled_writepage(struct page *page, unsigned int len);
+ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
+-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+- int pextents);
+
+ /*
+ * Test whether an inode is a fast symlink.
+@@ -183,6 +183,8 @@ void ext4_evict_inode(struct inode *inod
+ {
+ handle_t *handle;
+ int err;
++ int extra_credits = 3;
++ struct ext4_xattr_ino_array *lea_ino_array = NULL;
+
+ trace_ext4_evict_inode(inode);
+
+@@ -235,8 +237,8 @@ void ext4_evict_inode(struct inode *inod
+ * protection against it
+ */
+ sb_start_intwrite(inode->i_sb);
+- handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
+- ext4_blocks_for_truncate(inode)+3);
++
++ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, extra_credits);
+ if (IS_ERR(handle)) {
+ ext4_std_error(inode->i_sb, PTR_ERR(handle));
+ /*
+@@ -248,9 +250,33 @@ void ext4_evict_inode(struct inode *inod
+ sb_end_intwrite(inode->i_sb);
+ goto no_delete;
+ }
+-
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
++
++ /*
++ * Delete xattr inode before deleting the main inode.
++ */
++ err = ext4_xattr_delete_inode(handle, inode, &lea_ino_array);
++ if (err) {
++ ext4_warning(inode->i_sb,
++ "couldn't delete inode's xattr (err %d)", err);
++ goto stop_handle;
++ }
++
++ if (!ext4_handle_has_enough_credits(handle,
++ ext4_blocks_for_truncate(inode) + extra_credits)) {
++ err = ext4_journal_extend(handle,
++ ext4_blocks_for_truncate(inode) + extra_credits);
++ if (err > 0)
++ err = ext4_journal_restart(handle,
++ ext4_blocks_for_truncate(inode) + extra_credits);
++ if (err != 0) {
++ ext4_warning(inode->i_sb,
++ "couldn't extend journal (err %d)", err);
++ goto stop_handle;
++ }
++ }
++
+ inode->i_size = 0;
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (err) {
+@@ -305,8 +331,12 @@ void ext4_evict_inode(struct inode *inod
+ ext4_clear_inode(inode);
+ else
+ ext4_free_inode(handle, inode);
++
+ ext4_journal_stop(handle);
+ sb_end_intwrite(inode->i_sb);
++
++ if (lea_ino_array != NULL)
++ ext4_xattr_inode_array_free(inode, lea_ino_array);
+ return;
+ no_delete:
+ ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
+@@ -4841,7 +4871,7 @@ static int ext4_index_trans_blocks(struc
+ *
+ * Also account for superblock, inode, quota and xattr blocks
+ */
+-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
++int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+ int pextents)
+ {
+ ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
+ int gdpblocks;
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/xattr.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.c
+@@ -220,19 +220,26 @@ ext4_xattr_check_block(struct inode *ino
+ }
+
+ static inline int
+-ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
++ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
++ struct inode *inode)
+ {
+ size_t value_size = le32_to_cpu(entry->e_value_size);
+
+- if (entry->e_value_block != 0 || value_size > size ||
+- le16_to_cpu(entry->e_value_offs) + value_size > size)
++ if ((entry->e_value_inum == 0) &&
++ (le16_to_cpu(entry->e_value_offs) + value_size > size))
++ return -EIO;
++ if (entry->e_value_inum != 0 &&
++ (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
++ le32_to_cpu(entry->e_value_inum) >
++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
+ return -EIO;
+ return 0;
+ }
+
+ static int
+ ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
+- const char *name, size_t size, int sorted)
++ const char *name, size_t size, int sorted,
++ struct inode *inode)
+ {
+ struct ext4_xattr_entry *entry;
+ size_t name_len;
+@@ -252,11 +259,103 @@ ext4_xattr_find_entry(struct ext4_xattr_
+ break;
+ }
+ *pentry = entry;
+- if (!cmp && ext4_xattr_check_entry(entry, size))
++ if (!cmp && ext4_xattr_check_entry(entry, size, inode))
+ return -EIO;
+ return cmp ? -ENODATA : 0;
+ }
+
++/*
++ * Read the EA value from an inode.
++ */
++static int
++ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
++{
++ unsigned long block = 0;
++ struct buffer_head *bh = NULL;
++ int err, blocksize;
++ size_t csize, ret_size = 0;
++
++ if (*size == 0)
++ return 0;
++
++ blocksize = ea_inode->i_sb->s_blocksize;
++
++ while (ret_size < *size) {
++ csize = (*size - ret_size) > blocksize ? blocksize :
++ *size - ret_size;
++ bh = ext4_bread(NULL, ea_inode, block, 0, &err);
++ if (!bh) {
++ *size = ret_size;
++ return err;
++ }
++ memcpy(buf, bh->b_data, csize);
++ brelse(bh);
++
++ buf += csize;
++ block += 1;
++ ret_size += csize;
++ }
++
++ *size = ret_size;
++
++ return err;
++}
++
++struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino, int *err)
++{
++ struct inode *ea_inode = NULL;
++
++ ea_inode = ext4_iget(parent->i_sb, ea_ino);
++ if (IS_ERR(ea_inode) || is_bad_inode(ea_inode)) {
++ ext4_error(parent->i_sb, "error while reading EA inode %d",
++ ea_ino);
++ *err = -EIO;
++ return NULL;
++ }
++
++ if (ea_inode->i_xattr_inode_parent != parent->i_ino ||
++ ea_inode->i_generation != parent->i_generation) {
++ ext4_error(parent->i_sb, "Backpointer from EA inode %d "
++ "to parent invalid.", ea_ino);
++ *err = -EINVAL;
++ goto error;
++ }
++
++ if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
++ ext4_error(parent->i_sb, "EA inode %d does not have "
++ "EXT4_EA_INODE_FL flag set.\n", ea_ino);
++ *err = -EINVAL;
++ goto error;
++ }
++
++ *err = 0;
++ return ea_inode;
++
++error:
++ iput(ea_inode);
++ return NULL;
++}
++
++/*
++ * Read the value from the EA inode.
++ */
++static int
++ext4_xattr_inode_get(struct inode *inode, int ea_ino, void *buffer,
++ size_t *size)
++{
++ struct inode *ea_inode = NULL;
++ int err;
++
++ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
++ if (err)
++ return err;
++
++ err = ext4_xattr_inode_read(ea_inode, buffer, size);
++ iput(ea_inode);
++
++ return err;
++}
++
+ static int
+ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ void *buffer, size_t buffer_size)
+@@ -288,7 +387,8 @@ bad_block:
+ }
+ ext4_xattr_cache_insert(bh);
+ entry = BFIRST(bh);
+- error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
++ error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
++ inode);
+ if (error == -EIO)
+ goto bad_block;
+ if (error)
+@@ -298,8 +398,16 @@ bad_block:
+ error = -ERANGE;
+ if (size > buffer_size)
+ goto cleanup;
+- memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
+- size);
++ if (entry->e_value_inum != 0) {
++ error = ext4_xattr_inode_get(inode,
++ le32_to_cpu(entry->e_value_inum),
++ buffer, &size);
++ if (error)
++ goto cleanup;
++ } else {
++ memcpy(buffer, bh->b_data +
++ le16_to_cpu(entry->e_value_offs), size);
++ }
+ }
+ error = size;
+
+@@ -333,7 +441,7 @@ ext4_xattr_ibody_get(struct inode *inode
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_find_entry(&entry, name_index, name,
+- end - (void *)entry, 0);
++ end - (void *)entry, 0, inode);
+ if (error)
+ goto cleanup;
+ size = le32_to_cpu(entry->e_value_size);
+@@ -341,8 +449,16 @@ ext4_xattr_ibody_get(struct inode *inode
+ error = -ERANGE;
+ if (size > buffer_size)
+ goto cleanup;
+- memcpy(buffer, (void *)IFIRST(header) +
+- le16_to_cpu(entry->e_value_offs), size);
++ if (entry->e_value_inum != 0) {
++ error = ext4_xattr_inode_get(inode,
++ le32_to_cpu(entry->e_value_inum),
++ buffer, &size);
++ if (error)
++ goto cleanup;
++ } else {
++ memcpy(buffer, (void *)IFIRST(header) +
++ le16_to_cpu(entry->e_value_offs), size);
++ }
+ }
+ error = size;
+
+@@ -568,7 +684,7 @@ static size_t ext4_xattr_free_space(stru
+ *total += EXT4_XATTR_LEN(last->e_name_len);
+ {
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+- if (!last->e_value_block && last->e_value_size) {
++ if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ size_t offs = le16_to_cpu(last->e_value_offs);
+ if (offs < *min_offs)
+ *min_offs = offs;
+@@ -577,16 +693,171 @@ static size_t ext4_xattr_free_space(stru
+ return (*min_offs - ((void *)last - base) - sizeof(__u32));
+ }
+
++/*
++ * Write the value of the EA in an inode.
++ */
++static int
++ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
++ const void *buf, int bufsize)
++{
++ struct buffer_head *bh = NULL;
++ unsigned long block = 0;
++ unsigned blocksize = ea_inode->i_sb->s_blocksize;
++ unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
++ int csize, wsize = 0;
++ int ret = 0;
++ int retries = 0;
++
++retry:
++ while (ret >= 0 && ret < max_blocks) {
++ struct ext4_map_blocks map;
++ map.m_lblk = block += ret;
++ map.m_len = max_blocks -= ret;
++
++ ret = ext4_map_blocks(handle, ea_inode, &map, EXT4_GET_BLOCKS_CREATE);
++ if (ret <= 0) {
++ ext4_mark_inode_dirty(handle, ea_inode);
++ if (ret == -ENOSPC &&
++ ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
++ ret = 0;
++ goto retry;
++ }
++ break;
++ }
++ }
++
++ if (ret < 0)
++ return ret;
++
++ block = 0;
++ while (wsize < bufsize) {
++ if (bh != NULL)
++ brelse(bh);
++ csize = (bufsize - wsize) > blocksize ? blocksize :
++ bufsize - wsize;
++ bh = ext4_getblk(handle, ea_inode, block, 0, &ret);
++ if (!bh)
++ goto out;
++ ret = ext4_journal_get_write_access(handle, bh);
++ if (ret)
++ goto out;
++
++ memcpy(bh->b_data, buf, csize);
++ set_buffer_uptodate(bh);
++ ext4_handle_dirty_metadata(handle, ea_inode, bh);
++
++ buf += csize;
++ wsize += csize;
++ block += 1;
++ }
++
++ i_size_write(ea_inode, wsize);
++ ext4_update_i_disksize(ea_inode, wsize);
++
++ ext4_mark_inode_dirty(handle, ea_inode);
++
++out:
++ brelse(bh);
++
++ return ret;
++}
++
++/*
++ * Create an inode to store the value of a large EA.
++ */
++static struct inode *
++ext4_xattr_inode_create(handle_t *handle, struct inode *inode)
++{
++ struct inode *ea_inode = NULL;
++
++ /*
++ * Let the next inode be the goal, so we try and allocate the EA inode
++ * in the same group, or nearby one.
++ */
++ ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
++ S_IFREG|0600, NULL, inode->i_ino + 1, NULL);
++
++ if (!IS_ERR(ea_inode)) {
++ ea_inode->i_op = &ext4_file_inode_operations;
++ ea_inode->i_fop = &ext4_file_operations;
++ ext4_set_aops(ea_inode);
++ ea_inode->i_generation = inode->i_generation;
++ EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
++
++ /*
++ * A back-pointer from EA inode to parent inode will be useful
++ * for e2fsck.
++ */
++ ea_inode->i_xattr_inode_parent = inode->i_ino;
++ unlock_new_inode(ea_inode);
++ }
++
++ return ea_inode;
++}
++
++/*
++ * Unlink the inode storing the value of the EA.
++ */
++int
++ext4_xattr_inode_unlink(struct inode *inode, int ea_ino)
++{
++ struct inode *ea_inode = NULL;
++ int err;
++
++ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
++ if (err)
++ return err;
++
++ clear_nlink(ea_inode);
++ iput(ea_inode);
++
++ return 0;
++}
++
++/*
++ * Add value of the EA in an inode.
++ */
+ static int
+-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
++ext4_xattr_inode_set(handle_t *handle, struct inode *inode, int *ea_ino,
++ const void *value, size_t value_len)
++{
++ struct inode *ea_inode = NULL;
++ int err;
++
++ /* Create an inode for the EA value */
++ ea_inode = ext4_xattr_inode_create(handle, inode);
++ if (IS_ERR(ea_inode))
++ return -1;
++
++ err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
++ if (err)
++ clear_nlink(ea_inode);
++ else
++ *ea_ino = ea_inode->i_ino;
++
++ iput(ea_inode);
++
++ return err;
++}
++
++static int
++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
++ handle_t *handle, struct inode *inode)
+ {
+ struct ext4_xattr_entry *last;
+ size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
++ int in_inode = i->in_inode;
++
++ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_INCOMPAT_EA_INODE) &&
++ (EXT4_XATTR_SIZE(i->value_len) >
++ EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
++ in_inode = 1;
+
+ /* Compute min_offs and last. */
+ last = s->first;
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+- if (!last->e_value_block && last->e_value_size) {
++ if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ size_t offs = le16_to_cpu(last->e_value_offs);
+ if (offs < min_offs)
+ min_offs = offs;
+@@ -594,16 +865,21 @@ ext4_xattr_set_entry(struct ext4_xattr_i
+ }
+ free = min_offs - ((void *)last - s->base) - sizeof(__u32);
+ if (!s->not_found) {
+- if (!s->here->e_value_block && s->here->e_value_size) {
++ if (!in_inode && s->here->e_value_inum == 0 &&
++ s->here->e_value_size > 0) {
+ size_t size = le32_to_cpu(s->here->e_value_size);
+ free += EXT4_XATTR_SIZE(size);
+ }
+ free += EXT4_XATTR_LEN(name_len);
+ }
+ if (i->value) {
+- if (free < EXT4_XATTR_SIZE(i->value_len) ||
+- free < EXT4_XATTR_LEN(name_len) +
+- EXT4_XATTR_SIZE(i->value_len))
++ size_t value_len = EXT4_XATTR_SIZE(i->value_len);
++
++ if (in_inode)
++ value_len = 0;
++
++ if (free < value_len ||
++ free < EXT4_XATTR_LEN(name_len) + value_len)
+ return -ENOSPC;
+ }
+
+@@ -617,7 +893,8 @@ ext4_xattr_set_entry(struct ext4_xattr_i
+ s->here->e_name_len = name_len;
+ memcpy(s->here->e_name, i->name, name_len);
+ } else {
+- if (!s->here->e_value_block && s->here->e_value_size) {
++ if (s->here->e_value_offs > 0 && s->here->e_value_inum == 0 &&
++ s->here->e_value_size > 0) {
+ void *first_val = s->base + min_offs;
+ size_t offs = le16_to_cpu(s->here->e_value_offs);
+ void *val = s->base + offs;
+@@ -651,13 +928,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i
+ last = s->first;
+ while (!IS_LAST_ENTRY(last)) {
+ size_t o = le16_to_cpu(last->e_value_offs);
+- if (!last->e_value_block &&
+- last->e_value_size && o < offs)
++ if (last->e_value_size > 0 && o < offs)
+ last->e_value_offs =
+ cpu_to_le16(o + size);
+ last = EXT4_XATTR_NEXT(last);
+ }
+ }
++ if (s->here->e_value_inum != 0) {
++ ext4_xattr_inode_unlink(inode,
++ le32_to_cpu(s->here->e_value_inum));
++ s->here->e_value_inum = 0;
++ }
+ if (!i->value) {
+ /* Remove the old name. */
+ size_t size = EXT4_XATTR_LEN(name_len);
+@@ -671,10 +952,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i
+ if (i->value) {
+ /* Insert the new value. */
+ s->here->e_value_size = cpu_to_le32(i->value_len);
+- if (i->value_len) {
++ if (in_inode) {
++ int ea_ino = le32_to_cpu(s->here->e_value_inum);
++ ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
++ i->value_len);
++ s->here->e_value_inum = cpu_to_le32(ea_ino);
++ s->here->e_value_offs = 0;
++ } else if (i->value_len) {
+ size_t size = EXT4_XATTR_SIZE(i->value_len);
+ void *val = s->base + min_offs - size;
+ s->here->e_value_offs = cpu_to_le16(min_offs - size);
++ s->here->e_value_inum = 0;
+ if (i->value == EXT4_ZERO_XATTR_VALUE) {
+ memset(val, 0, size);
+ } else {
+@@ -724,7 +1012,7 @@ ext4_xattr_block_find(struct inode *inod
+ bs->s.end = bs->bh->b_data + bs->bh->b_size;
+ bs->s.here = bs->s.first;
+ error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
+- i->name, bs->bh->b_size, 1);
++ i->name, bs->bh->b_size, 1, inode);
+ if (error && error != -ENODATA)
+ goto cleanup;
+ bs->s.not_found = error;
+@@ -748,8 +1036,6 @@ ext4_xattr_block_set(handle_t *handle, s
+
+ #define header(x) ((struct ext4_xattr_header *)(x))
+
+- if (i->value && i->value_len > sb->s_blocksize)
+- return -ENOSPC;
+ if (s->base) {
+ ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+ bs->bh->b_blocknr);
+@@ -764,7 +1050,7 @@ ext4_xattr_block_set(handle_t *handle, s
+ ce = NULL;
+ }
+ ea_bdebug(bs->bh, "modifying in-place");
+- error = ext4_xattr_set_entry(i, s);
++ error = ext4_xattr_set_entry(i, s, handle, inode);
+ if (!error) {
+ if (!IS_LAST_ENTRY(s->first))
+ ext4_xattr_rehash(header(s->base),
+@@ -815,7 +1101,7 @@ ext4_xattr_block_set(handle_t *handle, s
+ s->end = s->base + sb->s_blocksize;
+ }
+
+- error = ext4_xattr_set_entry(i, s);
++ error = ext4_xattr_set_entry(i, s, handle, inode);
+ if (error == -EIO)
+ goto bad_block;
+ if (error)
+@@ -963,7 +1249,7 @@ int ext4_xattr_ibody_find(struct inode *
+ /* Find the named attribute. */
+ error = ext4_xattr_find_entry(&is->s.here, i->name_index,
+ i->name, is->s.end -
+- (void *)is->s.base, 0);
++ (void *)is->s.base, 0, inode);
+ if (error && error != -ENODATA)
+ return error;
+ is->s.not_found = error;
+@@ -981,7 +1267,7 @@ int ext4_xattr_ibody_inline_set(handle_t
+
+ if (EXT4_I(inode)->i_extra_isize == 0)
+ return -ENOSPC;
+- error = ext4_xattr_set_entry(i, s);
++ error = ext4_xattr_set_entry(i, s, handle, inode);
+ if (error) {
+ if (error == -ENOSPC &&
+ ext4_has_inline_data(inode)) {
+@@ -993,7 +1279,7 @@ int ext4_xattr_ibody_inline_set(handle_t
+ error = ext4_xattr_ibody_find(inode, i, is);
+ if (error)
+ return error;
+- error = ext4_xattr_set_entry(i, s);
++ error = ext4_xattr_set_entry(i, s, handle, inode);
+ }
+ if (error)
+ return error;
+@@ -1019,7 +1305,7 @@ static int ext4_xattr_ibody_set(handle_t
+
+ if (EXT4_I(inode)->i_extra_isize == 0)
+ return -ENOSPC;
+- error = ext4_xattr_set_entry(i, s);
++ error = ext4_xattr_set_entry(i, s, handle, inode);
+ if (error)
+ return error;
+ header = IHDR(inode, ext4_raw_inode(&is->iloc));
+@@ -1055,7 +1341,7 @@ ext4_xattr_set_handle(handle_t *handle,
+ .name = name,
+ .value = value,
+ .value_len = value_len,
+-
++ .in_inode = 0,
+ };
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+@@ -1120,6 +1406,15 @@ ext4_xattr_set_handle(handle_t *handle,
+ goto cleanup;
+ }
+ error = ext4_xattr_block_set(handle, inode, &i, &bs);
++ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_INCOMPAT_EA_INODE) &&
++ error == -ENOSPC) {
++ /* xattr not fit to block, store at external
++ * inode */
++ i.in_inode = 1;
++ error = ext4_xattr_ibody_set(handle, inode,
++ &i, &is);
++ }
+ if (error)
+ goto cleanup;
+ if (!is.s.not_found) {
+@@ -1166,9 +1461,22 @@ ext4_xattr_set(struct inode *inode, int
+ const void *value, size_t value_len, int flags)
+ {
+ handle_t *handle;
++ struct super_block *sb = inode->i_sb;
+ int error, retries = 0;
+ int credits = ext4_jbd2_credits_xattr(inode);
+
++ if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
++ EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
++ int nrblocks = (value_len + sb->s_blocksize - 1) >>
++ sb->s_blocksize_bits;
++
++ /* For new inode */
++ credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
++
++ /* For data blocks of EA inode */
++ credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
++ }
++
+ retry:
+ handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
+ if (IS_ERR(handle)) {
+@@ -1180,7 +1488,7 @@ retry:
+ value, value_len, flags);
+ error2 = ext4_journal_stop(handle);
+ if (error == -ENOSPC &&
+- ext4_should_retry_alloc(inode->i_sb, &retries))
++ ext4_should_retry_alloc(sb, &retries))
+ goto retry;
+ if (error == 0)
+ error = error2;
+@@ -1202,7 +1510,7 @@ static void ext4_xattr_shift_entries(str
+
+ /* Adjust the value offsets of the entries */
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+- if (!last->e_value_block && last->e_value_size) {
++ if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ new_offs = le16_to_cpu(last->e_value_offs) +
+ value_offs_shift;
+ BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
+@@ -1441,19 +1749,91 @@ cleanup:
+ }
+
+
+-
++#define EIA_INCR 16 /* must be 2^n */
++#define EIA_MASK (EIA_INCR - 1)
++/* Add the large xattr @ino into @lea_ino_array for later deletion.
++ * If @lea_ino_array is new or full it will be grown and the old
++ * contents copied over.
++ */
++static int
++ext4_expand_ino_array(struct ext4_xattr_ino_array **lea_ino_array, __u32 ino)
++{
++ if (*lea_ino_array == NULL) {
++ /*
++ * Start with 15 inodes, so it fits into a power-of-two size.
++ * If *lea_ino_array is NULL, this is essentially offsetof()
++ */
++ (*lea_ino_array) =
++ kmalloc(offsetof(struct ext4_xattr_ino_array,
++ xia_inodes[EIA_MASK]),
++ GFP_NOFS);
++ if (*lea_ino_array == NULL)
++ return -ENOMEM;
++ (*lea_ino_array)->xia_count = 0;
++ } else if (((*lea_ino_array)->xia_count & EIA_MASK) == EIA_MASK) {
++ /* expand the array once all 15 + n * 16 slots are full */
++ struct ext4_xattr_ino_array *new_array = NULL;
++ int count = (*lea_ino_array)->xia_count;
++
++ /* if new_array is NULL, this is essentially offsetof() */
++ new_array = kmalloc(
++ offsetof(struct ext4_xattr_ino_array,
++ xia_inodes[count + EIA_INCR]),
++ GFP_NOFS);
++ if (new_array == NULL)
++ return -ENOMEM;
++ memcpy(new_array, *lea_ino_array,
++ offsetof(struct ext4_xattr_ino_array,
++ xia_inodes[count]));
++ kfree(*lea_ino_array);
++ *lea_ino_array = new_array;
++ }
++ (*lea_ino_array)->xia_inodes[(*lea_ino_array)->xia_count++] = ino;
++ return 0;
++}
+ /*
+ * ext4_xattr_delete_inode()
+ *
+- * Free extended attribute resources associated with this inode. This
++ * Free extended attribute resources associated with this inode. Traverse
++ * all entries and unlink any xattr inodes associated with this inode. This
+ * is called immediately before an inode is freed. We have exclusive
+- * access to the inode.
++ * access to the inode. If an orphan inode is deleted it will also delete any
++ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
++ * to ensure they belong to the parent inode and were not deleted already.
+ */
+-void
+-ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
++int
++ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
++ struct ext4_xattr_ino_array **lea_ino_array)
+ {
+ struct buffer_head *bh = NULL;
++ struct ext4_xattr_ibody_header *header;
++ struct ext4_inode *raw_inode;
++ struct ext4_iloc iloc;
++ struct ext4_xattr_entry *entry;
++ int error = 0;
++
++ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
++ goto delete_external_ea;
++
++ error = ext4_get_inode_loc(inode, &iloc);
++ if (error)
++ goto cleanup;
++ raw_inode = ext4_raw_inode(&iloc);
++ header = IHDR(inode, raw_inode);
++ entry = IFIRST(header);
++ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++ if (entry->e_value_inum == 0)
++ continue;
++ if (ext4_expand_ino_array(lea_ino_array,
++ entry->e_value_inum) != 0) {
++ brelse(iloc.bh);
++ goto cleanup;
++ }
++ entry->e_value_inum = 0;
++ }
++ brelse(iloc.bh);
+
++delete_external_ea:
+ if (!EXT4_I(inode)->i_file_acl)
+ goto cleanup;
+ bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+@@ -1468,11 +1848,74 @@ ext4_xattr_delete_inode(handle_t *handle
+ EXT4_I(inode)->i_file_acl);
+ goto cleanup;
+ }
++
++ entry = BFIRST(bh);
++ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++ if (entry->e_value_inum == 0)
++ continue;
++ if (ext4_expand_ino_array(lea_ino_array,
++ entry->e_value_inum) != 0)
++ goto cleanup;
++ entry->e_value_inum = 0;
++ }
++
++ /* add xattr inode to orphan list */
++ if (*lea_ino_array != NULL) {
++ struct inode *ea_inode = NULL;
++ int idx = 0;
++
++ for (; idx < (*lea_ino_array)->xia_count; ++idx) {
++ if (!ext4_handle_has_enough_credits(handle, 3)) {
++ error = ext4_journal_extend(handle, 3);
++ if (error > 0)
++ error = ext4_journal_restart(handle, 3);
++ if (error != 0) {
++ ext4_warning(inode->i_sb,
++ "couldn't extend journal "
++ "(err %d)", error);
++ goto cleanup;
++ }
++ }
++ ea_inode = ext4_xattr_inode_iget(inode,
++ (*lea_ino_array)->xia_inodes[idx], &error);
++ if (error)
++ continue;
++ ext4_orphan_add(handle, ea_inode);
++ /* the inode's i_count will be released by caller */
++ }
++ }
++
+ ext4_xattr_release_block(handle, inode, bh);
+ EXT4_I(inode)->i_file_acl = 0;
+
+ cleanup:
+ brelse(bh);
++
++ return error;
++}
++
++void
++ext4_xattr_inode_array_free(struct inode *inode,
++ struct ext4_xattr_ino_array *lea_ino_array)
++{
++ struct inode *ea_inode = NULL;
++ int idx = 0;
++ int err;
++
++ if (lea_ino_array == NULL)
++ return;
++
++ for (; idx < lea_ino_array->xia_count; ++idx) {
++ ea_inode = ext4_xattr_inode_iget(inode,
++ lea_ino_array->xia_inodes[idx], &err);
++ if (err)
++ continue;
++ clear_nlink(ea_inode);
++ iput(ea_inode);
++ /* for inode's i_count get from ext4_xattr_delete_inode */
++ iput(ea_inode);
++ }
++ kfree(lea_ino_array);
+ }
+
+ /*
+@@ -1542,10 +1985,9 @@ ext4_xattr_cmp(struct ext4_xattr_header
+ entry1->e_name_index != entry2->e_name_index ||
+ entry1->e_name_len != entry2->e_name_len ||
+ entry1->e_value_size != entry2->e_value_size ||
++ entry1->e_value_inum != entry2->e_value_inum ||
+ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
+ return 1;
+- if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
+- return -EIO;
+ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
+ (char *)header2 + le16_to_cpu(entry2->e_value_offs),
+ le32_to_cpu(entry1->e_value_size)))
+@@ -1629,7 +2071,7 @@ static inline void ext4_xattr_hash_entry
+ *name++;
+ }
+
+- if (entry->e_value_block == 0 && entry->e_value_size != 0) {
++ if (entry->e_value_inum == 0 && entry->e_value_size != 0) {
+ __le32 *value = (__le32 *)((char *)header +
+ le16_to_cpu(entry->e_value_offs));
+ for (n = (le32_to_cpu(entry->e_value_size) +
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/xattr.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/xattr.h
+@@ -42,7 +42,7 @@ struct ext4_xattr_entry {
+ __u8 e_name_len; /* length of name */
+ __u8 e_name_index; /* attribute name index */
+ __le16 e_value_offs; /* offset in disk block of value */
+- __le32 e_value_block; /* disk block attribute is stored on (n/i) */
++ __le32 e_value_inum; /* inode in which the value is stored */
+ __le32 e_value_size; /* size of attribute value */
+ __le32 e_hash; /* hash value of name and value */
+ char e_name[0]; /* attribute name */
+@@ -67,6 +67,15 @@ struct ext4_xattr_entry {
+ EXT4_I(inode)->i_extra_isize))
+ #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+
++#define i_xattr_inode_parent i_mtime.tv_sec
++
++/*
++ * The minimum size of EA value when you start storing it in an external inode
++ * size of block - size of header - size of 1 entry - 4 null bytes
++*/
++#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \
++ ((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
++
+ #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
+ #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
+ #define BFIRST(bh) ENTRY(BHDR(bh)+1)
+@@ -75,10 +84,11 @@ struct ext4_xattr_entry {
+ #define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+
+ struct ext4_xattr_info {
+- int name_index;
+ const char *name;
+ const void *value;
+ size_t value_len;
++ int name_index;
++ int in_inode;
+ };
+
+ struct ext4_xattr_search {
+@@ -106,7 +116,13 @@ extern int ext4_xattr_get(struct inode *
+ extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
+ extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
+
+-extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
++extern struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino,
++ int *err);
++extern int ext4_xattr_inode_unlink(struct inode *inode, int ea_ino);
++extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
++ struct ext4_xattr_ino_array **array);
++extern void ext4_xattr_inode_array_free(struct inode *inode,
++ struct ext4_xattr_ino_array *array);
+ extern void ext4_xattr_put_super(struct super_block *);
+
+ extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ialloc.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ialloc.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ialloc.c
+@@ -250,7 +250,6 @@ void ext4_free_inode(handle_t *handle, s
+ * as writing the quota to disk may need the lock as well.
+ */
+ dquot_initialize(inode);
+- ext4_xattr_delete_inode(handle, inode);
+ dquot_free_inode(inode);
+ dquot_drop(inode);
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inline.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inline.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inline.c
+@@ -59,7 +59,7 @@ static int get_max_inline_xattr_value_si
+
+ /* Compute min_offs. */
+ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+- if (!entry->e_value_block && entry->e_value_size) {
++ if (!entry->e_value_inum && entry->e_value_size) {
+ size_t offs = le16_to_cpu(entry->e_value_offs);
+ if (offs < min_offs)
+ min_offs = offs;
--- /dev/null
+Index: linux-3.10.9-200.fc17.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.9-200.fc17.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.9-200.fc17.x86_64/fs/ext4/namei.c
+@@ -1438,6 +1438,32 @@ static struct dentry *ext4_lookup(struct
+ return ERR_PTR(-EIO);
+ }
+ }
++ /* ".." shouldn't go into dcache to preserve dcache hierarchy
++ * otherwise we'll get parent being a child of actual child.
++ * see bug 10458 for details -bzzz */
++ if (inode && (dentry->d_name.name[0] == '.' && (dentry->d_name.len == 1 ||
++ (dentry->d_name.len == 2 && dentry->d_name.name[1] == '.')))) {
++ struct dentry *goal = NULL;
++
++ /* first, look for an existing dentry - any one is good */
++ goal = d_find_any_alias(inode);
++ if (goal == NULL) {
++ spin_lock(&dentry->d_lock);
++ /* there is no alias, we need to make current dentry:
++ * a) inaccessible for __d_lookup()
++ * b) inaccessible for iopen */
++ J_ASSERT(hlist_unhashed(&dentry->d_alias));
++ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
++ /* this is d_instantiate() ... */
++ hlist_add_head(&dentry->d_alias, &inode->i_dentry);
++ dentry->d_inode = inode;
++ spin_unlock(&dentry->d_lock);
++ }
++ if (goal)
++ iput(inode);
++ return goal;
++ }
++
+ return d_splice_alias(inode, dentry);
+ }
+
--- /dev/null
+Add a proc interface for max_dir_size.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/super.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+@@ -2476,8 +2476,11 @@ static ssize_t sbi_ui_show(struct ext4_a
+ struct ext4_sb_info *sbi, char *buf)
+ {
+ unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
++ unsigned int v = *ui;
+
+- return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
++ if (strcmp("max_dir_size", a->attr.name) == 0)
++ v <<= 10;
++ return snprintf(buf, PAGE_SIZE, "%u\n", v);
+ }
+
+ static ssize_t sbi_ui_store(struct ext4_attr *a,
+@@ -2491,6 +2494,8 @@ static ssize_t sbi_ui_store(struct ext4_
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
++ if (strcmp("max_dir_size", a->attr.name) == 0)
++ t >>= 10;
+ *ui = t;
+ return count;
+ }
+@@ -2557,6 +2562,8 @@ EXT4_RW_ATTR(reserved_clusters);
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
+ inode_readahead_blks_store, s_inode_readahead_blks);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
++EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size_kb);
++EXT4_RW_ATTR_SBI_UI(max_dir_size_kb, s_max_dir_size_kb);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+@@ -2581,6 +2588,8 @@ static struct attribute *ext4_attrs[] =
+ ATTR_LIST(reserved_clusters),
+ ATTR_LIST(inode_readahead_blks),
+ ATTR_LIST(inode_goal),
++ ATTR_LIST(max_dir_size),
++ ATTR_LIST(max_dir_size_kb),
+ ATTR_LIST(mb_stats),
+ ATTR_LIST(mb_max_to_scan),
+ ATTR_LIST(mb_min_to_scan),
--- /dev/null
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+@@ -2391,6 +2391,7 @@ struct ext4_group_info {
+ ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
+ ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
+ struct list_head bb_prealloc_list;
++ unsigned long bb_prealloc_nr;
+ #ifdef DOUBLE_CHECK
+ void *bb_bitmap;
+ #endif
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/mballoc.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
+@@ -362,7 +362,7 @@ static const char *ext4_groupinfo_slab_n
+ "ext4_groupinfo_64k", "ext4_groupinfo_128k"
+ };
+
+-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ ext4_group_t group);
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+ ext4_group_t group);
+@@ -718,7 +718,7 @@ mb_set_largest_free_order(struct super_b
+ }
+
+ static noinline_for_stack
+-void ext4_mb_generate_buddy(struct super_block *sb,
++int ext4_mb_generate_buddy(struct super_block *sb,
+ void *buddy, void *bitmap, ext4_group_t group)
+ {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+@@ -750,19 +750,13 @@ void ext4_mb_generate_buddy(struct super
+ grp->bb_fragments = fragments;
+
+ if (free != grp->bb_free) {
+- ext4_grp_locked_error(sb, group, 0, 0,
+- "block bitmap and bg descriptor "
+- "inconsistent: %u vs %u free clusters",
+- free, grp->bb_free);
+- /*
+- * If we intend to continue, we consider group descriptor
+- * corrupt and update bb_free using bitmap value
+- */
+- grp->bb_free = free;
+- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+- percpu_counter_sub(&sbi->s_freeclusters_counter,
+- grp->bb_free);
+- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
++ struct ext4_group_desc *gdp;
++ gdp = ext4_get_group_desc (sb, group, NULL);
++ ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
++ "%u in gd, %lu pa's\n", (long unsigned int)group,
++ free, grp->bb_free, ext4_free_group_clusters(sb, gdp),
++ grp->bb_prealloc_nr);
++ return -EIO;
+ }
+ mb_set_largest_free_order(sb, grp);
+
+@@ -768,6 +767,8 @@ void ext4_mb_generate_buddy(struct super
+ EXT4_SB(sb)->s_mb_buddies_generated++;
+ EXT4_SB(sb)->s_mb_generation_time += period;
+ spin_unlock(&EXT4_SB(sb)->s_bal_lock);
++
++ return 0;
+ }
+
+ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+@@ -883,7 +884,7 @@ static int ext4_mb_init_cache(struct pag
+ }
+
+ first_block = page->index * blocks_per_page;
+- for (i = 0; i < blocks_per_page; i++) {
++ for (i = 0; i < blocks_per_page && err == 0; i++) {
+ group = (first_block + i) >> 1;
+ if (group >= ngroups)
+ break;
+@@ -922,7 +923,7 @@ static int ext4_mb_init_cache(struct pag
+ ext4_lock_group(sb, group);
+ /* init the buddy */
+ memset(data, 0xff, blocksize);
+- ext4_mb_generate_buddy(sb, data, incore, group);
++ err = ext4_mb_generate_buddy(sb, data, incore, group);
+ ext4_unlock_group(sb, group);
+ incore = NULL;
+ } else {
+@@ -937,7 +938,7 @@ static int ext4_mb_init_cache(struct pag
+ memcpy(data, bitmap, blocksize);
+
+ /* mark all preallocated blks used in in-core bitmap */
+- ext4_mb_generate_from_pa(sb, data, group);
++ err = ext4_mb_generate_from_pa(sb, data, group);
+ ext4_mb_generate_from_freelist(sb, data, group);
+ ext4_unlock_group(sb, group);
+
+@@ -947,7 +948,8 @@ static int ext4_mb_init_cache(struct pag
+ incore = data;
+ }
+ }
+- SetPageUptodate(page);
++ if (likely(err == 0))
++ SetPageUptodate(page);
+
+ out:
+ if (bh) {
+@@ -2224,9 +2226,11 @@ static void *ext4_mb_seq_groups_next(str
+ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ {
+ struct super_block *sb = seq->private;
++ struct ext4_group_desc *gdp;
+ ext4_group_t group = (ext4_group_t) ((unsigned long) v);
+ int i;
+ int err, buddy_loaded = 0;
++ int free = 0;
+ struct ext4_buddy e4b;
+ struct ext4_group_info *grinfo;
+ struct sg {
+@@ -2236,10 +2240,10 @@ static int ext4_mb_seq_groups_show(struc
+
+ group--;
+ if (group == 0)
+- seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
++ seq_printf(seq, "#%-5s: %-5s %-5s %-5s %-5s %-5s"
+ "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
+ "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
+- "group", "free", "frags", "first",
++ "group", "bfree", "gfree", "frags", "first", "pa",
+ "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
+ "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
+
+@@ -2256,13 +2260,19 @@ static int ext4_mb_seq_groups_show(struc
+ buddy_loaded = 1;
+ }
+
++ gdp = ext4_get_group_desc(sb, group, NULL);
++ if (gdp != NULL)
++ free = ext4_free_group_clusters(sb, gdp);
++
+ memcpy(&sg, ext4_get_group_info(sb, group), i);
+
+ if (buddy_loaded)
+ ext4_mb_unload_buddy(&e4b);
+
+- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
+- sg.info.bb_fragments, sg.info.bb_first_free);
++ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
++ (long unsigned int)group, sg.info.bb_free, free,
++ sg.info.bb_fragments, sg.info.bb_first_free,
++ sg.info.bb_prealloc_nr);
+ for (i = 0; i <= 13; i++)
+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
+ sg.info.bb_counters[i] : 0);
+@@ -3507,22 +3517,67 @@ static void ext4_mb_generate_from_freeli
+ }
+
+ /*
++ * check free blocks in bitmap match free block in group descriptor
++ * do this before taking preallocated blocks into account to be able
++ * to detect on-disk corruptions. The group lock should be hold by the
++ * caller.
++ */
++int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
++ struct ext4_group_desc *gdp, int group)
++{
++ unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
++ unsigned short i, first, free = 0;
++
++ i = mb_find_next_zero_bit(bitmap, max, 0);
++
++ while (i < max) {
++ first = i;
++ i = mb_find_next_bit(bitmap, max, i);
++ if (i > max)
++ i = max;
++ free += i - first;
++ if (i < max)
++ i = mb_find_next_zero_bit(bitmap, max, i);
++ }
++
++ if (free != ext4_free_group_clusters(sb, gdp)) {
++ ext4_error(sb, "on-disk bitmap for group %d"
++ "corrupted: %u blocks free in bitmap, %u - in gd\n",
++ group, free, ext4_free_group_clusters(sb, gdp));
++ return -EIO;
++ }
++ return 0;
++}
++
++/*
+ * the function goes through all preallocation in this group and marks them
+ * used in in-core bitmap. buddy must be generated from this bitmap
+ * Need to be called with ext4 group lock held
+ */
+ static noinline_for_stack
+-void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ ext4_group_t group)
+ {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+ struct ext4_prealloc_space *pa;
++ struct ext4_group_desc *gdp;
+ struct list_head *cur;
+ ext4_group_t groupnr;
+ ext4_grpblk_t start;
+ int preallocated = 0;
++ int skip = 0, count = 0;
++ int err;
+ int len;
+
++ gdp = ext4_get_group_desc (sb, group, NULL);
++ if (gdp == NULL)
++ return -EIO;
++
++ /* before applying preallocations, check bitmap consistency */
++ err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
++ if (err)
++ return err;
++
+ /* all form of preallocation discards first load group,
+ * so the only competing code is preallocation use.
+ * we don't need any locking here
+@@ -3538,13 +3593,23 @@ void ext4_mb_generate_from_pa(struct sup
+ &groupnr, &start);
+ len = pa->pa_len;
+ spin_unlock(&pa->pa_lock);
+- if (unlikely(len == 0))
++ if (unlikely(len == 0)) {
++ skip++;
+ continue;
++ }
+ BUG_ON(groupnr != group);
+ ext4_set_bits(bitmap, start, len);
+ preallocated += len;
++ count ++;
++ }
++ if (count + skip != grp->bb_prealloc_nr) {
++ ext4_error(sb, "lost preallocations: "
++ "count %d, bb_prealloc_nr %lu, skip %d\n",
++ count, grp->bb_prealloc_nr, skip);
++ return -EIO;
+ }
+ mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
++ return 0;
+ }
+
+ static void ext4_mb_pa_callback(struct rcu_head *head)
+@@ -3603,6 +3668,7 @@ static void ext4_mb_put_pa(struct ext4_a
+ */
+ ext4_lock_group(sb, grp);
+ list_del(&pa->pa_group_list);
++ ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
+ ext4_unlock_group(sb, grp);
+
+ spin_lock(pa->pa_obj_lock);
+@@ -3697,6 +3763,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+
+ ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+ list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++ grp->bb_prealloc_nr++;
+ ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+
+ spin_lock(pa->pa_obj_lock);
+@@ -3758,6 +3825,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+
+ ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+ list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++ grp->bb_prealloc_nr++;
+ ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+
+ /*
+@@ -3927,6 +3995,8 @@ repeat:
+
+ spin_unlock(&pa->pa_lock);
+
++ BUG_ON(grp->bb_prealloc_nr == 0);
++ grp->bb_prealloc_nr--;
+ list_del(&pa->pa_group_list);
+ list_add(&pa->u.pa_tmp_list, &list);
+ }
+@@ -4056,7 +4126,7 @@ repeat:
+ if (err) {
+ ext4_error(sb, "Error loading buddy information for %u",
+ group);
+- continue;
++ return;
+ }
+
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+@@ -4068,6 +4138,8 @@ repeat:
+ }
+
+ ext4_lock_group(sb, group);
++ BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
++ e4b.bd_info->bb_prealloc_nr--;
+ list_del(&pa->pa_group_list);
+ ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
+ ext4_unlock_group(sb, group);
+@@ -4328,6 +4400,7 @@ ext4_mb_discard_lg_preallocations(struct
+ }
+ ext4_lock_group(sb, group);
+ list_del(&pa->pa_group_list);
++ ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+ ext4_mb_release_group_pa(&e4b, pa);
+ ext4_unlock_group(sb, group);
+
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.h
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/mballoc.h
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.h
+@@ -82,7 +82,7 @@ extern ushort ext4_mballoc_debug;
+ /*
+ * for which requests use 2^N search using buddies
+ */
+-#define MB_DEFAULT_ORDER2_REQS 2
++#define MB_DEFAULT_ORDER2_REQS 8
+
+ /*
+ * default group prealloc size 512 blocks
--- /dev/null
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/mballoc.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.c
+@@ -3747,6 +3747,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+ INIT_LIST_HEAD(&pa->pa_group_list);
+ pa->pa_deleted = 0;
+ pa->pa_type = MB_INODE_PA;
++ pa->pa_error = 0;
+
+ mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
+ pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+@@ -3808,6 +3809,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+ INIT_LIST_HEAD(&pa->pa_group_list);
+ pa->pa_deleted = 0;
+ pa->pa_type = MB_GROUP_PA;
++ pa->pa_error = 0;
+
+ mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
+ pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+@@ -3868,7 +3870,9 @@ ext4_mb_release_inode_pa(struct ext4_bud
+ int err = 0;
+ int free = 0;
+
++ assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ BUG_ON(pa->pa_deleted == 0);
++ BUG_ON(pa->pa_inode == NULL);
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+ grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
+ BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+@@ -3891,12 +3895,18 @@ ext4_mb_release_inode_pa(struct ext4_bud
+ mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
+ bit = next + 1;
+ }
+- if (free != pa->pa_free) {
+- ext4_msg(e4b->bd_sb, KERN_CRIT,
+- "pa %p: logic %lu, phys. %lu, len %lu",
+- pa, (unsigned long) pa->pa_lstart,
+- (unsigned long) pa->pa_pstart,
+- (unsigned long) pa->pa_len);
++
++ /* "free < pa->pa_free" means we maybe double alloc the same blocks,
++ * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
++ if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
++ ext4_error(sb, "pa free mismatch: [pa %p] "
++ "[phy %lu] [logic %lu] [len %u] [free %u] "
++ "[error %u] [inode %lu] [freed %u]", pa,
++ (unsigned long)pa->pa_pstart,
++ (unsigned long)pa->pa_lstart,
++ (unsigned)pa->pa_len, (unsigned)pa->pa_free,
++ (unsigned)pa->pa_error, pa->pa_inode->i_ino,
++ free);
+ ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
+ free, pa->pa_free);
+ /*
+@@ -3904,6 +3914,8 @@ ext4_mb_release_inode_pa(struct ext4_bud
+ * from the bitmap and continue.
+ */
+ }
++ /* do not verify if the file system is being umounted */
++ BUG_ON(atomic_read(&sb->s_active) > 0 && pa->pa_free != free);
+ atomic_add(free, &sbi->s_mb_discarded);
+
+ return err;
+@@ -4661,6 +4673,25 @@ errout:
+ ac->ac_b_ex.fe_len = 0;
+ ar->len = 0;
+ ext4_mb_show_ac(ac);
++ if (ac->ac_pa) {
++ struct ext4_prealloc_space *pa = ac->ac_pa;
++
++ /* We can not make sure whether the bitmap has
++ * been updated or not when fail case. So can
++ * not revert pa_free back, just mark pa_error*/
++ pa->pa_error++;
++ ext4_error(sb,
++ "Updating bitmap error: [err %d] "
++ "[pa %p] [phy %lu] [logic %lu] "
++ "[len %u] [free %u] [error %u] "
++ "[inode %lu]", *errp, pa,
++ (unsigned long)pa->pa_pstart,
++ (unsigned long)pa->pa_lstart,
++ (unsigned)pa->pa_len,
++ (unsigned)pa->pa_free,
++ (unsigned)pa->pa_error,
++ pa->pa_inode ? pa->pa_inode->i_ino : 0);
++ }
+ }
+ ext4_mb_release_context(ac);
+ out:
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/mballoc.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.h
+@@ -19,6 +19,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/blkdev.h>
+ #include <linux/mutex.h>
++#include <linux/genhd.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+
+@@ -125,6 +126,7 @@ struct ext4_prealloc_space {
+ ext4_grpblk_t pa_len; /* len of preallocated chunk */
+ ext4_grpblk_t pa_free; /* how many blocks are free */
+ unsigned short pa_type; /* pa type. inode or group */
++ unsigned short pa_error;
+ spinlock_t *pa_obj_lock;
+ struct inode *pa_inode; /* hack, for history only */
+ };
--- /dev/null
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -1427,6 +1427,8 @@ static inline void ext4_clear_state_flag
+
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+
++#define JOURNAL_START_HAS_3ARGS 1
++
+ /*
+ * Codes for operating systems
+ */
+@@ -2612,6 +2614,11 @@ struct ext4_extent;
+
+ extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
+ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
++ ext4_group_t block_group);
++extern struct buffer_head *ext4_append(handle_t *handle,
++ struct inode *inode,
++ ext4_lblk_t *block);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
+ int chunk);
+ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+@@ -48,7 +48,7 @@
+ #define NAMEI_RA_BLOCKS 4
+ #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+
+-static struct buffer_head *ext4_append(handle_t *handle,
++struct buffer_head *ext4_append(handle_t *handle,
+ struct inode *inode,
+ ext4_lblk_t *block)
+ {
+@@ -155,6 +155,7 @@ static struct buffer_head *__ext4_read_d
+ }
+ return bh;
+ }
++EXPORT_SYMBOL(ext4_append);
+
+ #ifndef assert
+ #define assert(test) J_ASSERT(test)
+@@ -2210,7 +2211,7 @@ out:
+ * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+ * since this indicates that nlinks count was previously 1.
+ */
+-static void ext4_inc_count(handle_t *handle, struct inode *inode)
++void ext4_inc_count(handle_t *handle, struct inode *inode)
+ {
+ inc_nlink(inode);
+ if (is_dx(inode) && inode->i_nlink > 1) {
+@@ -2222,16 +2223,18 @@ static void ext4_inc_count(handle_t *han
+ }
+ }
+ }
++EXPORT_SYMBOL(ext4_inc_count);
+
+ /*
+ * If a directory had nlink == 1, then we should let it be 1. This indicates
+ * directory has >EXT4_LINK_MAX subdirs.
+ */
+-static void ext4_dec_count(handle_t *handle, struct inode *inode)
++void ext4_dec_count(handle_t *handle, struct inode *inode)
+ {
+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
+ drop_nlink(inode);
+ }
++EXPORT_SYMBOL(ext4_dec_count);
+
+
+ static int ext4_add_nondir(handle_t *handle,
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ialloc.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ialloc.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ialloc.c
+@@ -111,7 +111,7 @@ void ext4_end_bitmap_read(struct buffer_
+ *
+ * Return buffer_head of bitmap on success or NULL.
+ */
+-static struct buffer_head *
++struct buffer_head *
+ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ {
+ struct ext4_group_desc *desc;
+@@ -191,6 +191,7 @@ verify:
+ set_buffer_verified(bh);
+ return bh;
+ }
++EXPORT_SYMBOL(ext4_read_inode_bitmap);
+
+ /*
+ * NOTE! When we get the inode, we're the only people
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+@@ -5281,3 +5281,17 @@ out:
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++EXPORT_SYMBOL(ext4_truncate);
++EXPORT_SYMBOL(ext4_iget);
++EXPORT_SYMBOL(ext4_bread);
++EXPORT_SYMBOL(ext4_itable_unused_count);
++EXPORT_SYMBOL(ext4_force_commit);
++EXPORT_SYMBOL(ext4_mark_inode_dirty);
++EXPORT_SYMBOL(ext4_get_group_desc);
++EXPORT_SYMBOL(__ext4_journal_get_write_access);
++EXPORT_SYMBOL(__ext4_journal_start_sb);
++EXPORT_SYMBOL(__ext4_journal_stop);
++EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
++EXPORT_SYMBOL(__ext4_std_error);
++EXPORT_SYMBOL(ext4fs_dirhash);
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/mballoc.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/mballoc.c
+@@ -5281,7 +5281,6 @@ out:
+ void *buddy, void *bitmap, ext4_group_t group)
+ {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+- struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
+ ext4_grpblk_t first;
--- /dev/null
+We won't change i_xtime in ldiskfs code path. But also
+need keep normal function out of Lustre. So we using
+S_NOCMTIME to indicate invoked from Lustre.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -1336,6 +1336,8 @@ static inline struct ext4_inode_info *EX
+
+ static inline struct timespec ext4_current_time(struct inode *inode)
+ {
++ if (IS_NOCMTIME(inode))
++ return inode->i_ctime;
+ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
+ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
+ }
--- /dev/null
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -2145,6 +2145,19 @@ extern int ext4_orphan_add(handle_t *, s
+ extern int ext4_orphan_del(handle_t *, struct inode *);
+ extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+ __u32 start_minor_hash, __u32 *next_hash);
++extern struct inode *ext4_create_inode(handle_t *handle,
++ struct inode * dir, int mode);
++extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode);
++extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
++ struct ext4_dir_entry_2 * de_del,
++ struct buffer_head * bh);
++extern struct buffer_head * ext4_find_entry(struct inode *dir,
++ const struct qstr *d_name,
++ struct ext4_dir_entry_2 ** res_dir,
++ int *inlined);
++extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++ struct inode *inode);
+ extern int search_dir(struct buffer_head *bh,
+ char *search_buf,
+ int buf_size,
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+@@ -1211,7 +1211,7 @@ static int is_dx_internal_node(struct in
+ * The returned buffer_head has ->b_count elevated. The caller is expected
+ * to brelse() it when appropriate.
+ */
+-static struct buffer_head * ext4_find_entry (struct inode *dir,
++struct buffer_head * ext4_find_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+ int *inlined)
+@@ -1355,6 +1355,7 @@ cleanup_and_exit:
+ brelse(bh_use[ra_ptr]);
+ return ret;
+ }
++EXPORT_SYMBOL(ext4_find_entry);
+
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir, int *err)
+@@ -1903,8 +1904,8 @@ static int make_indexed_dir(handle_t *ha
+ * may not sleep between calling this and putting something into
+ * the entry, as someone else might have used it while you slept.
+ */
+-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+- struct inode *inode)
++int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct buffer_head *bh;
+@@ -1979,6 +1980,7 @@ static int ext4_add_entry(handle_t *hand
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+ return retval;
+ }
++EXPORT_SYMBOL(ext4_add_entry);
+
+ /*
+ * Returns 0 for success, or a negative error value
+@@ -2165,7 +2167,7 @@ int ext4_generic_delete_entry(handle_t *
+ return -ENOENT;
+ }
+
+-static int ext4_delete_entry(handle_t *handle,
++int ext4_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh)
+@@ -2206,7 +2208,7 @@ out:
+ ext4_std_error(dir->i_sb, err);
+ return err;
+ }
+-
++EXPORT_SYMBOL(ext4_delete_entry);
+ /*
+ * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+ * since this indicates that nlinks count was previously 1.
+@@ -2253,6 +2255,28 @@ static int ext4_add_nondir(handle_t *han
+ return err;
+ }
+
++ /* Return locked inode, then the caller can modify the inode's states/flags
++ * before others finding it. The caller should unlock the inode by itself. */
++struct inode * ext4_create_inode(handle_t *handle, struct inode * dir, int mode)
++{
++ struct inode *inode;
++
++ inode = ext4_new_inode(handle, dir, mode, NULL, 0, NULL);
++ if (!IS_ERR(inode)) {
++ if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
++#ifdef CONFIG_LDISKFS_FS_XATTR
++ inode->i_op = &ext4_special_inode_operations;
++#endif
++ } else {
++ inode->i_op = &ext4_file_inode_operations;
++ inode->i_fop = &ext4_file_operations;
++ ext4_set_aops(inode);
++ }
++ }
++ return inode;
++}
++EXPORT_SYMBOL(ext4_create_inode);
++
+ /*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+@@ -2402,6 +2426,23 @@ out:
+ return err;
+ }
+
++/* Initialize @inode as a subdirectory of @dir, and add the
++ * "." and ".." entries into the first directory block. */
++int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
++ struct inode *inode)
++{
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(dir))
++ ext4_handle_sync(handle);
++
++ inode->i_op = &ext4_dir_inode_operations.ops;
++ inode->i_fop = &ext4_dir_operations;
++ return ext4_init_new_dir(handle, dir, inode);
++}
++EXPORT_SYMBOL(ext4_add_dot_dotdot);
++
+ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+ handle_t *handle;
--- /dev/null
+Single directory performance is a critical for HPC workloads. In a
+typical use case an application creates a separate output file for
+each node and task in a job. As nodes and tasks increase, hundreds
+of thousands of files may be created in a single directory within
+a short window of time.
+Today, both filename lookup and file system modifying operations
+(such as create and unlink) are protected with a single lock for
+an entire ldiskfs directory. PDO project will remove this
+bottleneck by introducing a parallel locking mechanism for entire
+ldiskfs directories. This work will enable multiple application
+threads to simultaneously lookup, create and unlink in parallel.
+
+This patch contains:
+ - pdirops support for ldiskfs
+ - N-level htree directory
+ - integrate with osd-ldiskfs
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/include/linux/htree_lock.h
+===================================================================
+--- /dev/null
++++ linux-3.10.0-123.13.2.el7.x86_64/include/linux/htree_lock.h
+@@ -0,0 +1,187 @@
++/*
++ * include/linux/htree_lock.h
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++
++/*
++ * htree lock
++ *
++ * htree_lock is an advanced lock, it can support five lock modes (concept is
++ * taken from DLM) and it's a sleeping lock.
++ *
++ * most common use case is:
++ * - create a htree_lock_head for data
++ * - each thread (contender) creates it's own htree_lock
++ * - contender needs to call htree_lock(lock_node, mode) to protect data and
++ * call htree_unlock to release lock
++ *
++ * Also, there is advanced use-case which is more complex, user can have
++ * PW/PR lock on particular key, it's mostly used while user holding shared
++ * lock on the htree (CW, CR)
++ *
++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
++ * ...
++ * htree_node_unlock(lock_node);; unlock the key
++ *
++ * Another tip is, we can have N-levels of this kind of keys, all we need to
++ * do is specifying N-levels while creating htree_lock_head, then we can
++ * lock/unlock a specific level by:
++ * htree_node_lock(lock_node, mode1, key1, level1...);
++ * do something;
++ * htree_node_lock(lock_node, mode1, key2, level2...);
++ * do something;
++ * htree_node_unlock(lock_node, level2);
++ * htree_node_unlock(lock_node, level1);
++ *
++ * NB: for multi-level, should be careful about locking order to avoid deadlock
++ */
++
++#ifndef _LINUX_HTREE_LOCK_H
++#define _LINUX_HTREE_LOCK_H
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++
++/*
++ * Lock Modes
++ * more details can be found here:
++ * http://en.wikipedia.org/wiki/Distributed_lock_manager
++ */
++typedef enum {
++ HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
++ HTREE_LOCK_PW, /* protected write: allows only CR users */
++ HTREE_LOCK_PR, /* protected read: allow PR, CR users */
++ HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
++ HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
++ HTREE_LOCK_MAX, /* number of lock modes */
++} htree_lock_mode_t;
++
++#define HTREE_LOCK_NL HTREE_LOCK_MAX
++#define HTREE_LOCK_INVAL 0xdead10c
++
++enum {
++ HTREE_HBITS_MIN = 2,
++ HTREE_HBITS_DEF = 14,
++ HTREE_HBITS_MAX = 32,
++};
++
++enum {
++ HTREE_EVENT_DISABLE = (0),
++ HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
++ HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
++ HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
++};
++
++struct htree_lock;
++
++typedef void (*htree_event_cb_t)(void *target, void *event);
++
++struct htree_lock_child {
++ struct list_head lc_list; /* granted list */
++ htree_event_cb_t lc_callback; /* event callback */
++ unsigned lc_events; /* event types */
++};
++
++struct htree_lock_head {
++ unsigned long lh_lock; /* bits lock */
++ /* blocked lock list (htree_lock) */
++ struct list_head lh_blocked_list;
++ /* # key levels */
++ u16 lh_depth;
++ /* hash bits for key and limit number of locks */
++ u16 lh_hbits;
++ /* counters for blocked locks */
++ u16 lh_nblocked[HTREE_LOCK_MAX];
++ /* counters for granted locks */
++ u16 lh_ngranted[HTREE_LOCK_MAX];
++ /* private data */
++ void *lh_private;
++ /* array of children locks */
++ struct htree_lock_child lh_children[0];
++};
++
++/* htree_lock_node_t is child-lock for a specific key (ln_value) */
++struct htree_lock_node {
++ htree_lock_mode_t ln_mode;
++ /* major hash key */
++ u16 ln_major_key;
++ /* minor hash key */
++ u16 ln_minor_key;
++ struct list_head ln_major_list;
++ struct list_head ln_minor_list;
++ /* alive list, all locks (granted, blocked, listening) are on it */
++ struct list_head ln_alive_list;
++ /* blocked list */
++ struct list_head ln_blocked_list;
++ /* granted list */
++ struct list_head ln_granted_list;
++ void *ln_ev_target;
++};
++
++struct htree_lock {
++ struct task_struct *lk_task;
++ struct htree_lock_head *lk_head;
++ void *lk_private;
++ unsigned lk_depth;
++ htree_lock_mode_t lk_mode;
++ struct list_head lk_blocked_list;
++ struct htree_lock_node lk_nodes[0];
++};
++
++/* create a lock head, which stands for a resource */
++struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
++ unsigned hbits, unsigned priv);
++/* free a lock head */
++void htree_lock_head_free(struct htree_lock_head *lhead);
++/* register event callback for child lock at level @depth */
++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
++ unsigned events, htree_event_cb_t callback);
++/* create a lock handle, which stands for a thread */
++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
++/* free a lock handle */
++void htree_lock_free(struct htree_lock *lck);
++/* lock htree, when @wait is true, 0 is returned if the lock can't
++ * be granted immediately */
++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++ htree_lock_mode_t mode, int wait);
++/* unlock htree */
++void htree_unlock(struct htree_lock *lck);
++/* unlock and relock htree with @new_mode */
++int htree_change_lock_try(struct htree_lock *lck,
++ htree_lock_mode_t new_mode, int wait);
++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
++/* require child lock (key) of htree at level @dep, @event will be sent to all
++ * listeners on this @key while lock being granted */
++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++ u32 key, unsigned dep, int wait, void *event);
++/* release child lock at level @dep, this lock will listen on it's key
++ * if @event isn't NULL, event_cb will be called against @lck while granting
++ * any other lock at level @dep with the same key */
++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
++/* stop listening on child lock at level @dep */
++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
++/* for debug */
++void htree_lock_stat_print(int depth);
++void htree_lock_stat_reset(void);
++
++#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
++#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
++
++#define htree_lock_mode(lck) ((lck)->lk_mode)
++
++#define htree_node_lock(lck, mode, key, dep) \
++ htree_node_lock_try(lck, mode, key, dep, 1, NULL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_granted(lck, dep) \
++ ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
++ (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_listening(lck, dep) \
++ ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
++
++#endif
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/htree_lock.c
+===================================================================
+--- /dev/null
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/htree_lock.c
+@@ -0,0 +1,880 @@
++/*
++ * fs/ext4/htree_lock.c
++ *
++ * Copyright (c) 2011, 2012, Intel Corporation.
++ *
++ * Author: Liang Zhen <liang@whamcloud.com>
++ */
++#include <linux/jbd2.h>
++#include <linux/hash.h>
++#include <linux/module.h>
++#include <linux/htree_lock.h>
++
++enum {
++ HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
++ HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
++ HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
++ HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
++ HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
++};
++
++enum {
++ HTREE_LOCK_COMPAT_EX = 0,
++ HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
++ HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
++ HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
++ HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
++ HTREE_LOCK_BIT_PW,
++};
++
++static int htree_lock_compat[] = {
++ [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
++ [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
++ [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
++ [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
++ [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
++};
++
++/* max allowed htree-lock depth.
++ * We only need depth=3 for ext4 although user can have higher value. */
++#define HTREE_LOCK_DEP_MAX 16
++
++#ifdef HTREE_LOCK_DEBUG
++
++static char *hl_name[] = {
++ [HTREE_LOCK_EX] "EX",
++ [HTREE_LOCK_PW] "PW",
++ [HTREE_LOCK_PR] "PR",
++ [HTREE_LOCK_CW] "CW",
++ [HTREE_LOCK_CR] "CR",
++};
++
++/* lock stats */
++struct htree_lock_node_stats {
++ unsigned long long blocked[HTREE_LOCK_MAX];
++ unsigned long long granted[HTREE_LOCK_MAX];
++ unsigned long long retried[HTREE_LOCK_MAX];
++ unsigned long long events;
++};
++
++struct htree_lock_stats {
++ struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
++ unsigned long long granted[HTREE_LOCK_MAX];
++ unsigned long long blocked[HTREE_LOCK_MAX];
++};
++
++static struct htree_lock_stats hl_stats;
++
++void htree_lock_stat_reset(void)
++{
++ memset(&hl_stats, 0, sizeof(hl_stats));
++}
++
++void htree_lock_stat_print(int depth)
++{
++ int i;
++ int j;
++
++ printk(KERN_DEBUG "HTREE LOCK STATS:\n");
++ for (i = 0; i < HTREE_LOCK_MAX; i++) {
++ printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
++ hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
++ }
++ for (i = 0; i < depth; i++) {
++ printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
++ for (j = 0; j < HTREE_LOCK_MAX; j++) {
++ printk(KERN_DEBUG
++ "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
++ hl_name[j], hl_stats.nodes[i].granted[j],
++ hl_stats.nodes[i].blocked[j],
++ hl_stats.nodes[i].retried[j]);
++ }
++ }
++}
++
++#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
++#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
++#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
++#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
++#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
++#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
++
++#else /* !DEBUG */
++
++void htree_lock_stat_reset(void) {}
++void htree_lock_stat_print(int depth) {}
++
++#define lk_grant_inc(m) do {} while (0)
++#define lk_block_inc(m) do {} while (0)
++#define ln_grant_inc(d, m) do {} while (0)
++#define ln_block_inc(d, m) do {} while (0)
++#define ln_retry_inc(d, m) do {} while (0)
++#define ln_event_inc(d) do {} while (0)
++
++#endif /* DEBUG */
++
++EXPORT_SYMBOL(htree_lock_stat_reset);
++EXPORT_SYMBOL(htree_lock_stat_print);
++
++#define HTREE_DEP_ROOT (-1)
++
++#define htree_spin_lock(lhead, dep) \
++ bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
++#define htree_spin_unlock(lhead, dep) \
++ bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
++
++#define htree_key_event_ignore(child, ln) \
++ (!((child)->lc_events & (1 << (ln)->ln_mode)))
++
++static int
++htree_key_list_empty(struct htree_lock_node *ln)
++{
++ return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
++}
++
++static void
++htree_key_list_del_init(struct htree_lock_node *ln)
++{
++ struct htree_lock_node *tmp = NULL;
++
++ if (!list_empty(&ln->ln_minor_list)) {
++ tmp = list_entry(ln->ln_minor_list.next,
++ struct htree_lock_node, ln_minor_list);
++ list_del_init(&ln->ln_minor_list);
++ }
++
++ if (list_empty(&ln->ln_major_list))
++ return;
++
++ if (tmp == NULL) { /* not on minor key list */
++ list_del_init(&ln->ln_major_list);
++ } else {
++ BUG_ON(!list_empty(&tmp->ln_major_list));
++ list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
++ }
++}
++
++static void
++htree_key_list_replace_init(struct htree_lock_node *old,
++ struct htree_lock_node *new)
++{
++ if (!list_empty(&old->ln_major_list))
++ list_replace_init(&old->ln_major_list, &new->ln_major_list);
++
++ if (!list_empty(&old->ln_minor_list))
++ list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
++}
++
++static void
++htree_key_event_enqueue(struct htree_lock_child *child,
++ struct htree_lock_node *ln, int dep, void *event)
++{
++ struct htree_lock_node *tmp;
++
++ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++ BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
++ if (event == NULL || htree_key_event_ignore(child, ln))
++ return;
++
++ /* shouldn't be a very long list */
++ list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
++ if (tmp->ln_mode == HTREE_LOCK_NL) {
++ ln_event_inc(dep);
++ if (child->lc_callback != NULL)
++ child->lc_callback(tmp->ln_ev_target, event);
++ }
++ }
++}
++
++static int
++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
++ unsigned dep, int wait, void *event)
++{
++ struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
++ struct htree_lock_node *newln = &newlk->lk_nodes[dep];
++ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
++
++ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
++ /* NB: we only expect PR/PW lock mode at here, only these two modes are
++ * allowed for htree_node_lock(asserted in htree_node_lock_internal),
++ * NL is only used for listener, user can't directly require NL mode */
++ if ((curln->ln_mode == HTREE_LOCK_NL) ||
++ (curln->ln_mode != HTREE_LOCK_PW &&
++ newln->ln_mode != HTREE_LOCK_PW)) {
++ /* no conflict, attach it on granted list of @curlk */
++ if (curln->ln_mode != HTREE_LOCK_NL) {
++ list_add(&newln->ln_granted_list,
++ &curln->ln_granted_list);
++ } else {
++ /* replace key owner */
++ htree_key_list_replace_init(curln, newln);
++ }
++
++ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++ htree_key_event_enqueue(child, newln, dep, event);
++ ln_grant_inc(dep, newln->ln_mode);
++ return 1; /* still hold lh_lock */
++ }
++
++ if (!wait) { /* can't grant and don't want to wait */
++ ln_retry_inc(dep, newln->ln_mode);
++ newln->ln_mode = HTREE_LOCK_INVAL;
++ return -1; /* don't wait and just return -1 */
++ }
++
++ newlk->lk_task = current;
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ /* conflict, attach it on blocked list of curlk */
++ list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
++ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++ ln_block_inc(dep, newln->ln_mode);
++
++ htree_spin_unlock(newlk->lk_head, dep);
++ /* wait to be given the lock */
++ if (newlk->lk_task != NULL)
++ schedule();
++ /* granted, no doubt, wake up will set me RUNNING */
++ if (event == NULL || htree_key_event_ignore(child, newln))
++ return 0; /* granted without lh_lock */
++
++ htree_spin_lock(newlk->lk_head, dep);
++ htree_key_event_enqueue(child, newln, dep, event);
++ return 1; /* still hold lh_lock */
++}
++
++/*
++ * get PR/PW access to particular tree-node according to @dep and @key,
++ * it will return -1 if @wait is false and can't immediately grant this lock.
++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
++ * @event if it's not NULL.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
++ htree_lock_mode_t mode, u32 key, unsigned dep,
++ int wait, void *event)
++{
++ LIST_HEAD (list);
++ struct htree_lock *tmp;
++ struct htree_lock *tmp2;
++ u16 major;
++ u16 minor;
++ u8 reverse;
++ u8 ma_bits;
++ u8 mi_bits;
++
++ BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
++ BUG_ON(htree_node_is_granted(lck, dep));
++
++ key = hash_long(key, lhead->lh_hbits);
++
++ mi_bits = lhead->lh_hbits >> 1;
++ ma_bits = lhead->lh_hbits - mi_bits;
++
++ lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
++ lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
++ lck->lk_nodes[dep].ln_mode = mode;
++
++ /*
++ * The major key list is an ordered list, so searches are started
++ * at the end of the list that is numerically closer to major_key,
++ * so at most half of the list will be walked (for well-distributed
++ * keys). The list traversal aborts early if the expected key
++ * location is passed.
++ */
++ reverse = (major >= (1 << (ma_bits - 1)));
++
++ if (reverse) {
++ list_for_each_entry_reverse(tmp,
++ &lhead->lh_children[dep].lc_list,
++ lk_nodes[dep].ln_major_list) {
++ if (tmp->lk_nodes[dep].ln_major_key == major) {
++ goto search_minor;
++
++ } else if (tmp->lk_nodes[dep].ln_major_key < major) {
++ /* attach _after_ @tmp */
++ list_add(&lck->lk_nodes[dep].ln_major_list,
++ &tmp->lk_nodes[dep].ln_major_list);
++ goto out_grant_major;
++ }
++ }
++
++ list_add(&lck->lk_nodes[dep].ln_major_list,
++ &lhead->lh_children[dep].lc_list);
++ goto out_grant_major;
++
++ } else {
++ list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
++ lk_nodes[dep].ln_major_list) {
++ if (tmp->lk_nodes[dep].ln_major_key == major) {
++ goto search_minor;
++
++ } else if (tmp->lk_nodes[dep].ln_major_key > major) {
++ /* insert _before_ @tmp */
++ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++ &tmp->lk_nodes[dep].ln_major_list);
++ goto out_grant_major;
++ }
++ }
++
++ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++ &lhead->lh_children[dep].lc_list);
++ goto out_grant_major;
++ }
++
++ search_minor:
++ /*
++ * NB: minor_key list doesn't have a "head", @list is just a
++ * temporary stub for helping list searching, make sure it's removed
++ * after searching.
++ * minor_key list is an ordered list too.
++ */
++ list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
++
++ reverse = (minor >= (1 << (mi_bits - 1)));
++
++ if (reverse) {
++ list_for_each_entry_reverse(tmp2, &list,
++ lk_nodes[dep].ln_minor_list) {
++ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++ goto out_enqueue;
++
++ } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
++ /* attach _after_ @tmp2 */
++ list_add(&lck->lk_nodes[dep].ln_minor_list,
++ &tmp2->lk_nodes[dep].ln_minor_list);
++ goto out_grant_minor;
++ }
++ }
++
++ list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
++
++ } else {
++ list_for_each_entry(tmp2, &list,
++ lk_nodes[dep].ln_minor_list) {
++ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++ goto out_enqueue;
++
++ } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
++ /* insert _before_ @tmp2 */
++ list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
++ &tmp2->lk_nodes[dep].ln_minor_list);
++ goto out_grant_minor;
++ }
++ }
++
++ list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
++ }
++
++ out_grant_minor:
++ if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
++ /* new lock @lck is the first one on minor_key list, which
++ * means it has the smallest minor_key and it should
++ * replace @tmp as minor_key owner */
++ list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
++ &lck->lk_nodes[dep].ln_major_list);
++ }
++ /* remove the temporary head */
++ list_del(&list);
++
++ out_grant_major:
++ ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
++ return 1; /* granted with holding lh_lock */
++
++ out_enqueue:
++ list_del(&list); /* remove temprary head */
++ return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
++}
++
++/*
++ * release the key of @lck at level @dep, and grant any blocked locks.
++ * caller will still listen on @key if @event is not NULL, which means
++ * caller can see a event (by event_cb) while granting any lock with
++ * the same key at level @dep.
++ * NB: ALWAYS called holding lhead::lh_lock
++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
++ */
++static void
++htree_node_unlock_internal(struct htree_lock_head *lhead,
++ struct htree_lock *curlk, unsigned dep, void *event)
++{
++ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
++ struct htree_lock *grtlk = NULL;
++ struct htree_lock_node *grtln;
++ struct htree_lock *poslk;
++ struct htree_lock *tmplk;
++
++ if (!htree_node_is_granted(curlk, dep))
++ return;
++
++ if (!list_empty(&curln->ln_granted_list)) {
++ /* there is another granted lock */
++ grtlk = list_entry(curln->ln_granted_list.next,
++ struct htree_lock,
++ lk_nodes[dep].ln_granted_list);
++ list_del_init(&curln->ln_granted_list);
++ }
++
++ if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
++ /*
++ * @curlk is the only granted lock, so we confirmed:
++ * a) curln is key owner (attached on major/minor_list),
++ * so if there is any blocked lock, it should be attached
++ * on curln->ln_blocked_list
++ * b) we always can grant the first blocked lock
++ */
++ grtlk = list_entry(curln->ln_blocked_list.next,
++ struct htree_lock,
++ lk_nodes[dep].ln_blocked_list);
++ BUG_ON(grtlk->lk_task == NULL);
++ wake_up_process(grtlk->lk_task);
++ }
++
++ if (event != NULL &&
++ lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
++ curln->ln_ev_target = event;
++ curln->ln_mode = HTREE_LOCK_NL; /* listen! */
++ } else {
++ curln->ln_mode = HTREE_LOCK_INVAL;
++ }
++
++ if (grtlk == NULL) { /* I must be the only one locking this key */
++ struct htree_lock_node *tmpln;
++
++ BUG_ON(htree_key_list_empty(curln));
++
++ if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
++ return;
++
++ /* not listening */
++ if (list_empty(&curln->ln_alive_list)) { /* no more listener */
++ htree_key_list_del_init(curln);
++ return;
++ }
++
++ tmpln = list_entry(curln->ln_alive_list.next,
++ struct htree_lock_node, ln_alive_list);
++
++ BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
++
++ htree_key_list_replace_init(curln, tmpln);
++ list_del_init(&curln->ln_alive_list);
++
++ return;
++ }
++
++ /* have a granted lock */
++ grtln = &grtlk->lk_nodes[dep];
++ if (!list_empty(&curln->ln_blocked_list)) {
++ /* only key owner can be on both lists */
++ BUG_ON(htree_key_list_empty(curln));
++
++ if (list_empty(&grtln->ln_blocked_list)) {
++ list_add(&grtln->ln_blocked_list,
++ &curln->ln_blocked_list);
++ }
++ list_del_init(&curln->ln_blocked_list);
++ }
++ /*
++ * NB: this is the tricky part:
++ * We have only two modes for child-lock (PR and PW), also,
++ * only owner of the key (attached on major/minor_list) can be on
++ * both blocked_list and granted_list, so @grtlk must be one
++ * of these two cases:
++ *
++ * a) @grtlk is taken from granted_list, which means we've granted
++ * more than one lock so @grtlk has to be PR, the first blocked
++ * lock must be PW and we can't grant it at all.
++ * So even @grtlk is not owner of the key (empty blocked_list),
++ * we don't care because we can't grant any lock.
++ * b) we just grant a new lock which is taken from head of blocked
++ * list, and it should be the first granted lock, and it should
++ * be the first one linked on blocked_list.
++ *
++ * Either way, we can get correct result by iterating blocked_list
++ * of @grtlk, and don't have to bother on how to find out
++ * owner of current key.
++ */
++ list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
++ lk_nodes[dep].ln_blocked_list) {
++ if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
++ poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
++ break;
++ /* grant all readers */
++ list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
++ list_add(&poslk->lk_nodes[dep].ln_granted_list,
++ &grtln->ln_granted_list);
++
++ BUG_ON(poslk->lk_task == NULL);
++ wake_up_process(poslk->lk_task);
++ }
++
++ /* if @curln is the owner of this key, replace it with @grtln */
++ if (!htree_key_list_empty(curln))
++ htree_key_list_replace_init(curln, grtln);
++
++ if (curln->ln_mode == HTREE_LOCK_INVAL)
++ list_del_init(&curln->ln_alive_list);
++}
++
++/*
++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
++ * and 0 only if @wait is false and can't grant it immediately
++ */
++int
++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++ u32 key, unsigned dep, int wait, void *event)
++{
++ struct htree_lock_head *lhead = lck->lk_head;
++ int rc;
++
++ BUG_ON(dep >= lck->lk_depth);
++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++ htree_spin_lock(lhead, dep);
++ rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
++ if (rc != 0)
++ htree_spin_unlock(lhead, dep);
++ return rc >= 0;
++}
++EXPORT_SYMBOL(htree_node_lock_try);
++
++/* it's wrapper of htree_node_unlock_internal */
++void
++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
++{
++ struct htree_lock_head *lhead = lck->lk_head;
++
++ BUG_ON(dep >= lck->lk_depth);
++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++ htree_spin_lock(lhead, dep);
++ htree_node_unlock_internal(lhead, lck, dep, event);
++ htree_spin_unlock(lhead, dep);
++}
++EXPORT_SYMBOL(htree_node_unlock);
++
++/* stop listening on child-lock level @dep */
++void
++htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
++{
++ struct htree_lock_node *ln = &lck->lk_nodes[dep];
++ struct htree_lock_node *tmp;
++
++ BUG_ON(htree_node_is_granted(lck, dep));
++ BUG_ON(!list_empty(&ln->ln_blocked_list));
++ BUG_ON(!list_empty(&ln->ln_granted_list));
++
++ if (!htree_node_is_listening(lck, dep))
++ return;
++
++ htree_spin_lock(lck->lk_head, dep);
++ ln->ln_mode = HTREE_LOCK_INVAL;
++ ln->ln_ev_target = NULL;
++
++ if (htree_key_list_empty(ln)) { /* not owner */
++ list_del_init(&ln->ln_alive_list);
++ goto out;
++ }
++
++ /* I'm the owner... */
++ if (list_empty(&ln->ln_alive_list)) { /* no more listener */
++ htree_key_list_del_init(ln);
++ goto out;
++ }
++
++ tmp = list_entry(ln->ln_alive_list.next,
++ struct htree_lock_node, ln_alive_list);
++
++ BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
++ htree_key_list_replace_init(ln, tmp);
++ list_del_init(&ln->ln_alive_list);
++ out:
++ htree_spin_unlock(lck->lk_head, dep);
++}
++EXPORT_SYMBOL(htree_node_stop_listen);
++
++/* release all child-locks if we have any */
++static void
++htree_node_release_all(struct htree_lock *lck)
++{
++ int i;
++
++ for (i = 0; i < lck->lk_depth; i++) {
++ if (htree_node_is_granted(lck, i))
++ htree_node_unlock(lck, i, NULL);
++ else if (htree_node_is_listening(lck, i))
++ htree_node_stop_listen(lck, i);
++ }
++}
++
++/*
++ * obtain htree lock, it could be blocked inside if there's conflict
++ * with any granted or blocked lock and @wait is true.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_lock_internal(struct htree_lock *lck, int wait)
++{
++ struct htree_lock_head *lhead = lck->lk_head;
++ int granted = 0;
++ int blocked = 0;
++ int i;
++
++ for (i = 0; i < HTREE_LOCK_MAX; i++) {
++ if (lhead->lh_ngranted[i] != 0)
++ granted |= 1 << i;
++ if (lhead->lh_nblocked[i] != 0)
++ blocked |= 1 << i;
++ }
++ if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
++ (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
++ /* will block current lock even it just conflicts with any
++ * other blocked lock, so lock like EX wouldn't starve */
++ if (!wait)
++ return -1;
++ lhead->lh_nblocked[lck->lk_mode]++;
++ lk_block_inc(lck->lk_mode);
++
++ lck->lk_task = current;
++ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ /* wait to be given the lock */
++ if (lck->lk_task != NULL)
++ schedule();
++ /* granted, no doubt. wake up will set me RUNNING */
++ return 0; /* without lh_lock */
++ }
++ lhead->lh_ngranted[lck->lk_mode]++;
++ lk_grant_inc(lck->lk_mode);
++ return 1;
++}
++
++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
++static void
++htree_unlock_internal(struct htree_lock *lck)
++{
++ struct htree_lock_head *lhead = lck->lk_head;
++ struct htree_lock *tmp;
++ struct htree_lock *tmp2;
++ int granted = 0;
++ int i;
++
++ BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
++
++ lhead->lh_ngranted[lck->lk_mode]--;
++ lck->lk_mode = HTREE_LOCK_INVAL;
++
++ for (i = 0; i < HTREE_LOCK_MAX; i++) {
++ if (lhead->lh_ngranted[i] != 0)
++ granted |= 1 << i;
++ }
++ list_for_each_entry_safe(tmp, tmp2,
++ &lhead->lh_blocked_list, lk_blocked_list) {
++ /* conflict with any granted lock? */
++ if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
++ break;
++
++ list_del_init(&tmp->lk_blocked_list);
++
++ BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
++
++ lhead->lh_nblocked[tmp->lk_mode]--;
++ lhead->lh_ngranted[tmp->lk_mode]++;
++ granted |= 1 << tmp->lk_mode;
++
++ BUG_ON(tmp->lk_task == NULL);
++ wake_up_process(tmp->lk_task);
++ }
++}
++
++/* it's wrapper of htree_lock_internal and exported interface.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++ htree_lock_mode_t mode, int wait)
++{
++ int rc;
++
++ BUG_ON(lck->lk_depth > lhead->lh_depth);
++ BUG_ON(lck->lk_head != NULL);
++ BUG_ON(lck->lk_task != NULL);
++
++ lck->lk_head = lhead;
++ lck->lk_mode = mode;
++
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ rc = htree_lock_internal(lck, wait);
++ if (rc != 0)
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return rc >= 0;
++}
++EXPORT_SYMBOL(htree_lock_try);
++
++/* it's wrapper of htree_unlock_internal and exported interface.
++ * It will release all htree_node_locks and htree_lock */
++void
++htree_unlock(struct htree_lock *lck)
++{
++ BUG_ON(lck->lk_head == NULL);
++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++ htree_node_release_all(lck);
++
++ htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
++ htree_unlock_internal(lck);
++ htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
++ lck->lk_head = NULL;
++ lck->lk_task = NULL;
++}
++EXPORT_SYMBOL(htree_unlock);
++
++/* change lock mode */
++void
++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
++{
++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++ lck->lk_mode = mode;
++}
++EXPORT_SYMBOL(htree_change_mode);
++
++/* release htree lock, and lock it again with new mode.
++ * This function will first release all htree_node_locks and htree_lock,
++ * then try to gain htree_lock with new @mode.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
++{
++ struct htree_lock_head *lhead = lck->lk_head;
++ int rc;
++
++ BUG_ON(lhead == NULL);
++ BUG_ON(lck->lk_mode == mode);
++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
++
++ htree_node_release_all(lck);
++
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ htree_unlock_internal(lck);
++ lck->lk_mode = mode;
++ rc = htree_lock_internal(lck, wait);
++ if (rc != 0)
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return rc >= 0;
++}
++EXPORT_SYMBOL(htree_change_lock_try);
++
++/* create a htree_lock head with @depth levels (number of child-locks),
++ * it is a per resoruce structure */
++struct htree_lock_head *
++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
++{
++ struct htree_lock_head *lhead;
++ int i;
++
++ if (depth > HTREE_LOCK_DEP_MAX) {
++ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++ depth, HTREE_LOCK_DEP_MAX);
++ return NULL;
++ }
++
++ lhead = kzalloc(offsetof(struct htree_lock_head,
++ lh_children[depth]) + priv, GFP_NOFS);
++ if (lhead == NULL)
++ return NULL;
++
++ if (hbits < HTREE_HBITS_MIN)
++ lhead->lh_hbits = HTREE_HBITS_MIN;
++ else if (hbits > HTREE_HBITS_MAX)
++ lhead->lh_hbits = HTREE_HBITS_MAX;
++
++ lhead->lh_lock = 0;
++ lhead->lh_depth = depth;
++ INIT_LIST_HEAD(&lhead->lh_blocked_list);
++ if (priv > 0) {
++ lhead->lh_private = (void *)lhead +
++ offsetof(struct htree_lock_head, lh_children[depth]);
++ }
++
++ for (i = 0; i < depth; i++) {
++ INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
++ lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
++ }
++ return lhead;
++}
++EXPORT_SYMBOL(htree_lock_head_alloc);
++
++/* free the htree_lock head */
++void
++htree_lock_head_free(struct htree_lock_head *lhead)
++{
++ int i;
++
++ BUG_ON(!list_empty(&lhead->lh_blocked_list));
++ for (i = 0; i < lhead->lh_depth; i++)
++ BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
++ kfree(lhead);
++}
++EXPORT_SYMBOL(htree_lock_head_free);
++
++/* register event callback for @events of child-lock at level @dep */
++void
++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
++ unsigned events, htree_event_cb_t callback)
++{
++ BUG_ON(lhead->lh_depth <= dep);
++ lhead->lh_children[dep].lc_events = events;
++ lhead->lh_children[dep].lc_callback = callback;
++}
++EXPORT_SYMBOL(htree_lock_event_attach);
++
++/* allocate a htree_lock, which is per-thread structure, @pbytes is some
++ * extra-bytes as private data for caller */
++struct htree_lock *
++htree_lock_alloc(unsigned depth, unsigned pbytes)
++{
++ struct htree_lock *lck;
++ int i = offsetof(struct htree_lock, lk_nodes[depth]);
++
++ if (depth > HTREE_LOCK_DEP_MAX) {
++ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++ depth, HTREE_LOCK_DEP_MAX);
++ return NULL;
++ }
++ lck = kzalloc(i + pbytes, GFP_NOFS);
++ if (lck == NULL)
++ return NULL;
++
++ if (pbytes != 0)
++ lck->lk_private = (void *)lck + i;
++ lck->lk_mode = HTREE_LOCK_INVAL;
++ lck->lk_depth = depth;
++ INIT_LIST_HEAD(&lck->lk_blocked_list);
++
++ for (i = 0; i < depth; i++) {
++ struct htree_lock_node *node = &lck->lk_nodes[i];
++
++ node->ln_mode = HTREE_LOCK_INVAL;
++ INIT_LIST_HEAD(&node->ln_major_list);
++ INIT_LIST_HEAD(&node->ln_minor_list);
++ INIT_LIST_HEAD(&node->ln_alive_list);
++ INIT_LIST_HEAD(&node->ln_blocked_list);
++ INIT_LIST_HEAD(&node->ln_granted_list);
++ }
++
++ return lck;
++}
++EXPORT_SYMBOL(htree_lock_alloc);
++
++/* free htree_lock node */
++void
++htree_lock_free(struct htree_lock *lck)
++{
++ BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
++ kfree(lck);
++}
++EXPORT_SYMBOL(htree_lock_free);
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/ext4.h
+@@ -27,6 +27,7 @@
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/wait.h>
++#include <linux/htree_lock.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/ratelimit.h>
+@@ -810,6 +811,9 @@ struct ext4_inode_info {
+ __u32 i_dtime;
+ ext4_fsblk_t i_file_acl;
+
++ /* following fields for parallel directory operations -bzzz */
++ struct semaphore i_append_sem;
++
+ /*
+ * i_block_group is the number of the block group which contains
+ * this file's inode. Constant across the lifetime of the inode,
+@@ -1536,6 +1540,7 @@ static inline void ext4_clear_state_flag
+ EXT4_FEATURE_INCOMPAT_META_BG| \
+ EXT4_FEATURE_INCOMPAT_EXTENTS| \
+ EXT4_FEATURE_INCOMPAT_64BIT| \
++ EXT4_FEATURE_INCOMPAT_LARGEDIR|\
+ EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+ EXT4_FEATURE_INCOMPAT_EA_INODE| \
+ EXT4_FEATURE_INCOMPAT_MMP | \
+@@ -1954,6 +1959,76 @@ struct mmpd_data {
+ # define NORET_TYPE /**/
+ # define ATTRIB_NORET __attribute__((noreturn))
+ # define NORET_AND noreturn,
++/* htree levels for ext4 */
++#define EXT4_HTREE_LEVEL_COMPAT 2
++#define EXT4_HTREE_LEVEL 3
++
++static inline int
++ext4_dir_htree_level(struct super_block *sb)
++{
++ return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
++ EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
++}
++
++/* assume name-hash is protected by upper layer */
++#define EXT4_HTREE_LOCK_HASH 0
++
++enum ext4_pdo_lk_types {
++#if EXT4_HTREE_LOCK_HASH
++ EXT4_LK_HASH,
++#endif
++ EXT4_LK_DX, /* index block */
++ EXT4_LK_DE, /* directory entry block */
++ EXT4_LK_SPIN, /* spinlock */
++ EXT4_LK_MAX,
++};
++
++/* read-only bit */
++#define EXT4_LB_RO(b) (1 << (b))
++/* read + write, high bits for writer */
++#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
++
++enum ext4_pdo_lock_bits {
++ /* DX lock bits */
++ EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
++ EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
++ /* DE lock bits */
++ EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
++ EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
++ /* DX spinlock bits */
++ EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
++ EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
++ /* accurate searching */
++ EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
++};
++
++enum ext4_pdo_lock_opc {
++ /* external */
++ EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
++ EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
++ EXT4_LB_EXACT),
++ EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
++ EXT4_LB_EXACT),
++ EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
++
++ /* internal */
++ EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
++ EXT4_LB_EXACT),
++ EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
++ EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
++};
++
++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
++#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
++
++extern struct htree_lock *ext4_htree_lock_alloc(void);
++#define ext4_htree_lock_free(lck) htree_lock_free(lck)
++
++extern void ext4_htree_lock(struct htree_lock *lck,
++ struct htree_lock_head *lhead,
++ struct inode *dir, unsigned flags);
++#define ext4_htree_unlock(lck) htree_unlock(lck)
++
+
+ struct ext4_xattr_ino_array {
+ unsigned int xia_count; /* # of used item in the array */
+@@ -2050,9 +2125,17 @@ void ext4_insert_dentry(struct inode *in
+ const char *name, int namelen, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
++ /* Disable it for ldiskfs, because going from a DX directory to
++ * a non-DX directory while it is in use will completely break
++ * the htree-locking.
++ * If we really want to support this operation in the future,
++ * we need to exclusively lock the directory at here which will
++ * increase complexity of code */
++#if 0
+ if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_COMPAT_DIR_INDEX))
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++#endif
+ }
+ static unsigned char ext4_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+@@ -2212,14 +2295,14 @@ extern int ext4_htree_fill_tree(struct f
+ extern struct inode *ext4_create_inode(handle_t *handle,
+ struct inode * dir, int mode);
+ extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+- struct inode *inode);
++ struct inode *inode, struct htree_lock *lck);
+ extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
+ struct ext4_dir_entry_2 * de_del,
+ struct buffer_head * bh);
+ extern struct buffer_head * ext4_find_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 ** res_dir,
+- int *inlined);
++ int *inlined, struct htree_lock *lck);
+ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+ struct inode *inode, const void *, const void *);
+ extern int search_dir(struct buffer_head *bh,
+@@ -2382,13 +2465,15 @@ static inline void ext4_r_blocks_count_s
+ es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
+ }
+
+-static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
++static inline loff_t ext4_isize(struct super_block *sb,
++ struct ext4_inode *raw_inode)
+ {
+- if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
++ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
++ S_ISREG(le16_to_cpu(raw_inode->i_mode)))
+ return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
+ le32_to_cpu(raw_inode->i_size_lo);
+- else
+- return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
++
++ return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
+ }
+
+ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/namei.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/namei.c
+@@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t
+ ext4_lblk_t *block)
+ {
+ struct buffer_head *bh;
++ struct ext4_inode_info *ei = EXT4_I(inode);
+ int err = 0;
+
+ if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+@@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t
+ EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+ return ERR_PTR(-ENOSPC);
+
++ /* with parallel dir operations all appends
++ * have to be serialized -bzzz */
++ down(&ei->i_append_sem);
++
+ *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+
+ bh = ext4_bread(handle, inode, *block, 1, &err);
+- if (!bh)
++ if (!bh) {
++ up(&ei->i_append_sem);
+ return ERR_PTR(err);
++ }
+ inode->i_size += inode->i_sb->s_blocksize;
+ EXT4_I(inode)->i_disksize = inode->i_size;
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, bh);
++ up(&ei->i_append_sem);
+ if (err) {
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+@@ -246,7 +254,7 @@ static struct dx_frame *dx_probe(const s
+ struct inode *dir,
+ struct dx_hash_info *hinfo,
+ struct dx_frame *frame,
+- int *err);
++ struct htree_lock *lck, int *err);
+ static void dx_release(struct dx_frame *frames);
+ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
+ struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+@@ -259,13 +267,13 @@ static void dx_insert_block(struct dx_fr
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+ struct dx_frame *frame,
+ struct dx_frame *frames,
+- __u32 *start_hash);
++ __u32 *start_hash, struct htree_lock *lck);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+- int *err);
++ struct htree_lock *lck, int *err);
+ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+- struct inode *inode);
++ struct inode *inode, struct htree_lock *lck);
+
+ /* checksumming functions */
+ void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+@@ -517,7 +525,7 @@ struct dx_root_info * dx_get_dx_info(str
+
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+- return le32_to_cpu(entry->block) & 0x00ffffff;
++ return le32_to_cpu(entry->block) & 0x0fffffff;
+ }
+
+ static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
+@@ -667,6 +675,223 @@ struct stats dx_show_entries(struct dx_h
+ }
+ #endif /* DX_DEBUG */
+
++/* private data for htree_lock */
++struct ext4_dir_lock_data {
++ unsigned ld_flags; /* bits-map for lock types */
++ unsigned ld_count; /* # entries of the last DX block */
++ struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
++ struct dx_entry *ld_at; /* position of leaf dx_entry */
++};
++
++#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
++
++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
++#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
++
++static void ext4_htree_event_cb(void *target, void *event)
++{
++ u64 *block = (u64 *)target;
++
++ if (*block == dx_get_block((struct dx_entry *)event))
++ *block = EXT4_HTREE_NODE_CHANGED;
++}
++
++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
++{
++ struct htree_lock_head *lhead;
++
++ lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
++ if (lhead != NULL) {
++ htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
++ ext4_htree_event_cb);
++ }
++ return lhead;
++}
++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
++
++struct htree_lock *ext4_htree_lock_alloc(void)
++{
++ return htree_lock_alloc(EXT4_LK_MAX,
++ sizeof(struct ext4_dir_lock_data));
++}
++EXPORT_SYMBOL(ext4_htree_lock_alloc);
++
++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
++{
++ switch (flags) {
++ default: /* 0 or unknown flags require EX lock */
++ return HTREE_LOCK_EX;
++ case EXT4_HLOCK_READDIR:
++ return HTREE_LOCK_PR;
++ case EXT4_HLOCK_LOOKUP:
++ return HTREE_LOCK_CR;
++ case EXT4_HLOCK_DEL:
++ case EXT4_HLOCK_ADD:
++ return HTREE_LOCK_CW;
++ }
++}
++
++/* return PR for read-only operations, otherwise return EX */
++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
++{
++ int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
++
++ /* 0 requires EX lock */
++ return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
++}
++
++static int ext4_htree_safe_locked(struct htree_lock *lck)
++{
++ int writer;
++
++ if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
++ return 1;
++
++ writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
++ EXT4_LB_DE;
++ if (writer) /* all readers & writers are excluded? */
++ return lck->lk_mode == HTREE_LOCK_EX;
++
++ /* all writers are excluded? */
++ return lck->lk_mode == HTREE_LOCK_PR ||
++ lck->lk_mode == HTREE_LOCK_PW ||
++ lck->lk_mode == HTREE_LOCK_EX;
++}
++
++/* relock htree_lock with EX mode if it's change operation, otherwise
++ * relock it with PR mode. It's noop if PDO is disabled. */
++static void ext4_htree_safe_relock(struct htree_lock *lck)
++{
++ if (!ext4_htree_safe_locked(lck)) {
++ unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
++
++ htree_change_lock(lck, ext4_htree_safe_mode(flags));
++ }
++}
++
++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
++ struct inode *dir, unsigned flags)
++{
++ htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
++ ext4_htree_safe_mode(flags);
++
++ ext4_htree_lock_data(lck)->ld_flags = flags;
++ htree_lock(lck, lhead, mode);
++ if (!is_dx(dir))
++ ext4_htree_safe_relock(lck); /* make sure it's safe locked */
++}
++EXPORT_SYMBOL(ext4_htree_lock);
++
++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
++ unsigned lmask, int wait, void *ev)
++{
++ u32 key = (at == NULL) ? 0 : dx_get_block(at);
++ u32 mode;
++
++ /* NOOP if htree is well protected or caller doesn't require the lock */
++ if (ext4_htree_safe_locked(lck) ||
++ !(ext4_htree_lock_data(lck)->ld_flags & lmask))
++ return 1;
++
++ mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
++ HTREE_LOCK_PW : HTREE_LOCK_PR;
++ while (1) {
++ if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
++ return 1;
++ if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
++ return 0;
++ cpu_relax(); /* spin until granted */
++ }
++}
++
++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
++{
++ return ext4_htree_safe_locked(lck) ||
++ htree_node_is_granted(lck, ffz(~lmask));
++}
++
++static void ext4_htree_node_unlock(struct htree_lock *lck,
++ unsigned lmask, void *buf)
++{
++ /* NB: it's safe to call mutiple times or even it's not locked */
++ if (!ext4_htree_safe_locked(lck) &&
++ htree_node_is_granted(lck, ffz(~lmask)))
++ htree_node_unlock(lck, ffz(~lmask), buf);
++}
++
++#define ext4_htree_dx_lock(lck, key) \
++ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
++#define ext4_htree_dx_lock_try(lck, key) \
++ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
++#define ext4_htree_dx_unlock(lck) \
++ ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
++#define ext4_htree_dx_locked(lck) \
++ ext4_htree_node_locked(lck, EXT4_LB_DX)
++
++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
++{
++ struct ext4_dir_lock_data *ld;
++
++ if (ext4_htree_safe_locked(lck))
++ return;
++
++ ld = ext4_htree_lock_data(lck);
++ switch (ld->ld_flags) {
++ default:
++ return;
++ case EXT4_HLOCK_LOOKUP:
++ ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
++ return;
++ case EXT4_HLOCK_DEL:
++ ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
++ return;
++ case EXT4_HLOCK_ADD:
++ ld->ld_flags = EXT4_HLOCK_SPLIT;
++ return;
++ }
++}
++
++#define ext4_htree_de_lock(lck, key) \
++ ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
++#define ext4_htree_de_unlock(lck) \
++ ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
++
++#define ext4_htree_spin_lock(lck, key, event) \
++ ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
++#define ext4_htree_spin_unlock(lck) \
++ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
++#define ext4_htree_spin_unlock_listen(lck, p) \
++ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
++
++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
++{
++ if (!ext4_htree_safe_locked(lck) &&
++ htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
++ htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
++}
++
++enum {
++ DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
++ DX_HASH_COL_YES, /* there is collision and it does matter */
++ DX_HASH_COL_NO, /* there is no collision */
++};
++
++static int dx_probe_hash_collision(struct htree_lock *lck,
++ struct dx_entry *entries,
++ struct dx_entry *at, u32 hash)
++{
++ if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
++ return DX_HASH_COL_IGNORE; /* don't care about collision */
++
++ } else if (at == entries + dx_get_count(entries) - 1) {
++ return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
++
++ } else { /* hash collision? */
++ return ((dx_get_hash(at + 1) & ~1) == hash) ?
++ DX_HASH_COL_YES : DX_HASH_COL_NO;
++ }
++}
++
+ /*
+ * Probe for a directory leaf block to search.
+ *
+@@ -678,16 +903,17 @@ struct stats dx_show_entries(struct dx_h
+ */
+ static struct dx_frame *
+ dx_probe(const struct qstr *d_name, struct inode *dir,
+- struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
++ struct dx_hash_info *hinfo, struct dx_frame *frame_in,
++ struct htree_lock *lck, int *err)
+ {
+ unsigned count, indirect;
+- struct dx_entry *at, *entries, *p, *q, *m;
++ struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
+ struct dx_root_info * info;
+ struct buffer_head *bh;
+ struct dx_frame *frame = frame_in;
+ u32 hash;
+
+- frame->bh = NULL;
++ memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
+ bh = ext4_read_dirblock(dir, 0, INDEX);
+ if (IS_ERR(bh)) {
+ *err = PTR_ERR(bh);
+@@ -720,9 +946,16 @@ dx_probe(const struct qstr *d_name, stru
+ goto fail;
+ }
+
+- if ((indirect = info->indirect_levels) > 1) {
+- ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
+- info->indirect_levels);
++ indirect = info->indirect_levels;
++ if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
++ ext4_warning(dir->i_sb,
++ "Directory (ino: %lu) htree depth %#06x exceed "
++ "supported value", dir->i_ino,
++ ext4_dir_htree_level(dir->i_sb));
++ if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
++ ext4_warning(dir->i_sb, "Enable large directory "
++ "feature to access it");
++ }
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+@@ -742,8 +975,15 @@ dx_probe(const struct qstr *d_name, stru
+ dxtrace(printk("Look up %x", hash));
+ while (1)
+ {
++ if (indirect == 0) { /* the last index level */
++ /* NB: ext4_htree_dx_lock() could be noop if
++ * DX-lock flag is not set for current operation */
++ ext4_htree_dx_lock(lck, dx);
++ ext4_htree_spin_lock(lck, dx, NULL);
++ }
+ count = dx_get_count(entries);
+- if (!count || count > dx_get_limit(entries)) {
++ if (count == 0 || count > dx_get_limit(entries)) {
++ ext4_htree_spin_unlock(lck); /* release spin */
+ ext4_warning(dir->i_sb,
+ "dx entry: no count or count > limit");
+ brelse(bh);
+@@ -784,7 +1024,70 @@ dx_probe(const struct qstr *d_name, stru
+ frame->bh = bh;
+ frame->entries = entries;
+ frame->at = at;
+- if (!indirect--) return frame;
++
++ if (indirect == 0) { /* the last index level */
++ struct ext4_dir_lock_data *ld;
++ u64 myblock;
++
++ /* By default we only lock DE-block, however, we will
++ * also lock the last level DX-block if:
++ * a) there is hash collision
++ * we will set DX-lock flag (a few lines below)
++ * and redo to lock DX-block
++ * see detail in dx_probe_hash_collision()
++ * b) it's a retry from splitting
++ * we need to lock the last level DX-block so nobody
++ * else can split any leaf blocks under the same
++ * DX-block, see detail in ext4_dx_add_entry()
++ */
++ if (ext4_htree_dx_locked(lck)) {
++ /* DX-block is locked, just lock DE-block
++ * and return */
++ ext4_htree_spin_unlock(lck);
++ if (!ext4_htree_safe_locked(lck))
++ ext4_htree_de_lock(lck, frame->at);
++ return frame;
++ }
++ /* it's pdirop and no DX lock */
++ if (dx_probe_hash_collision(lck, entries, at, hash) ==
++ DX_HASH_COL_YES) {
++ /* found hash collision, set DX-lock flag
++ * and retry to abtain DX-lock */
++ ext4_htree_spin_unlock(lck);
++ ext4_htree_dx_need_lock(lck);
++ continue;
++ }
++ ld = ext4_htree_lock_data(lck);
++ /* because I don't lock DX, so @at can't be trusted
++ * after I release spinlock so I have to save it */
++ ld->ld_at = at;
++ ld->ld_at_entry = *at;
++ ld->ld_count = dx_get_count(entries);
++
++ frame->at = &ld->ld_at_entry;
++ myblock = dx_get_block(at);
++
++ /* NB: ordering locking */
++ ext4_htree_spin_unlock_listen(lck, &myblock);
++ /* other thread can split this DE-block because:
++ * a) I don't have lock for the DE-block yet
++ * b) I released spinlock on DX-block
++ * if it happened I can detect it by listening
++ * splitting event on this DE-block */
++ ext4_htree_de_lock(lck, frame->at);
++ ext4_htree_spin_stop_listen(lck);
++
++ if (myblock == EXT4_HTREE_NODE_CHANGED) {
++ /* someone split this DE-block before
++ * I locked it, I need to retry and lock
++ * valid DE-block */
++ ext4_htree_de_unlock(lck);
++ continue;
++ }
++ return frame;
++ }
++ dx = at;
++ indirect--;
+ bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+ if (IS_ERR(bh)) {
+ *err = PTR_ERR(bh);
+@@ -818,13 +1121,18 @@ fail:
+ static void dx_release (struct dx_frame *frames)
+ {
+ struct dx_root_info *info;
++ int i;
++
+ if (frames[0].bh == NULL)
+ return;
+
+ info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
+- if (info->indirect_levels)
+- brelse(frames[1].bh);
+- brelse(frames[0].bh);
++ for (i = 0; i <= info->indirect_levels; i++) {
++ if (frames[i].bh == NULL)
++ break;
++ brelse(frames[i].bh);
++ frames[i].bh = NULL;
++ }
+ }
+
+ /*
+@@ -847,7 +1155,7 @@ static void dx_release (struct dx_frame
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+ struct dx_frame *frame,
+ struct dx_frame *frames,
+- __u32 *start_hash)
++ __u32 *start_hash, struct htree_lock *lck)
+ {
+ struct dx_frame *p;
+ struct buffer_head *bh;
+@@ -862,12 +1170,22 @@ static int ext4_htree_next_block(struct
+ * this loop, num_frames indicates the number of interior
+ * nodes need to be read.
+ */
++ ext4_htree_de_unlock(lck);
+ while (1) {
+- if (++(p->at) < p->entries + dx_get_count(p->entries))
+- break;
++ if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
++ /* num_frames > 0 :
++ * DX block
++ * ext4_htree_dx_locked:
++ * frame->at is reliable pointer returned by dx_probe,
++ * otherwise dx_probe already knew no collision */
++ if (++(p->at) < p->entries + dx_get_count(p->entries))
++ break;
++ }
+ if (p == frames)
+ return 0;
+ num_frames++;
++ if (num_frames == 1)
++ ext4_htree_dx_unlock(lck);
+ p--;
+ }
+
+@@ -890,6 +1208,13 @@ static int ext4_htree_next_block(struct
+ * block so no check is necessary
+ */
+ while (num_frames--) {
++ if (num_frames == 0) {
++ /* it's not always necessary, we just don't want to
++ * detect hash collision again */
++ ext4_htree_dx_need_lock(lck);
++ ext4_htree_dx_lock(lck, p->at);
++ }
++
+ bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+@@ -898,6 +1223,7 @@ static int ext4_htree_next_block(struct
+ p->bh = bh;
+ p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+ }
++ ext4_htree_de_lock(lck, p->at);
+ return 1;
+ }
+
+@@ -966,7 +1292,7 @@ int ext4_htree_fill_tree(struct file *di
+ {
+ struct dx_hash_info hinfo;
+ struct ext4_dir_entry_2 *de;
+- struct dx_frame frames[2], *frame;
++ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ struct inode *dir;
+ ext4_lblk_t block;
+ int count = 0;
+@@ -1000,10 +1326,10 @@ int ext4_htree_fill_tree(struct file *di
+ }
+ hinfo.hash = start_hash;
+ hinfo.minor_hash = 0;
+- frame = dx_probe(NULL, dir, &hinfo, frames, &err);
++ /* assume it's PR locked */
++ frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
+ if (!frame)
+ return err;
+-
+ /* Add '.' and '..' from the htree header */
+ if (!start_hash && !start_minor_hash) {
+ de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+@@ -1030,7 +1356,7 @@ int ext4_htree_fill_tree(struct file *di
+ count += ret;
+ hashval = ~0;
+ ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+- frame, frames, &hashval);
++ frame, frames, &hashval, NULL);
+ *next_hash = hashval;
+ if (ret < 0) {
+ err = ret;
+@@ -1226,7 +1552,7 @@ static int is_dx_internal_node(struct in
+ struct buffer_head * ext4_find_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+- int *inlined)
++ int *inlined, struct htree_lock *lck)
+ {
+ struct super_block *sb;
+ struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -1270,7 +1596,7 @@ struct buffer_head * ext4_find_entry(str
+ goto restart;
+ }
+ if (is_dx(dir)) {
+- bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
++ bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
+ /*
+ * On success, or if the error was file not found,
+ * return. Otherwise, fall back to doing a search the
+@@ -1280,6 +1606,7 @@ struct buffer_head * ext4_find_entry(str
+ return bh;
+ dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+ "falling back\n"));
++ ext4_htree_safe_relock(lck);
+ }
+ nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+ start = EXT4_I(dir)->i_dir_start_lookup;
+@@ -1369,17 +1696,19 @@ cleanup_and_exit:
+ }
+ EXPORT_SYMBOL(ext4_find_entry);
+
+-static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
+- struct ext4_dir_entry_2 **res_dir, int *err)
++static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
++ const struct qstr *d_name,
++ struct ext4_dir_entry_2 **res_dir,
++ struct htree_lock *lck, int *err)
+ {
+ struct super_block * sb = dir->i_sb;
+ struct dx_hash_info hinfo;
+- struct dx_frame frames[2], *frame;
++ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ struct buffer_head *bh;
+ ext4_lblk_t block;
+ int retval;
+
+- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
++ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
+ return NULL;
+ do {
+ block = dx_get_block(frame->at);
+@@ -1403,7 +1732,7 @@ static struct buffer_head * ext4_dx_find
+
+ /* Check to see if we should continue to search */
+ retval = ext4_htree_next_block(dir, hinfo.hash, frame,
+- frames, NULL);
++ frames, NULL, lck);
+ if (retval < 0) {
+ ext4_warning(sb,
+ "error reading index page in directory #%lu",
+@@ -1429,7 +1758,7 @@ static struct dentry *ext4_lookup(struct
+ if (dentry->d_name.len > EXT4_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, NULL);
+ if (IS_ERR(bh))
+ return (struct dentry *) bh;
+ inode = NULL;
+@@ -1489,7 +1818,7 @@ struct dentry *ext4_get_parent(struct de
+ struct ext4_dir_entry_2 * de;
+ struct buffer_head *bh;
+
+- bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
++ bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL, NULL);
+ if (IS_ERR(bh))
+ return (struct dentry *) bh;
+ if (!bh)
+@@ -1559,8 +1888,9 @@ static struct ext4_dir_entry_2* dx_pack_
+ * Returns pointer to de in block into which the new entry will be inserted.
+ */
+ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+- struct buffer_head **bh,struct dx_frame *frame,
+- struct dx_hash_info *hinfo, int *error)
++ struct buffer_head **bh, struct dx_frame *frames,
++ struct dx_frame *frame, struct dx_hash_info *hinfo,
++ struct htree_lock *lck, int *error)
+ {
+ unsigned blocksize = dir->i_sb->s_blocksize;
+ unsigned count, continued;
+@@ -1624,7 +1954,14 @@ static struct ext4_dir_entry_2 *do_split
+ hash2, split, count-split));
+
+ /* Fancy dance to stay within two buffers */
+- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
++ if (hinfo->hash < hash2) {
++ de2 = dx_move_dirents(data1, data2, map + split,
++ count - split, blocksize);
++ } else {
++ /* make sure we will add entry to the same block which
++ * we have already locked */
++ de2 = dx_move_dirents(data1, data2, map, split, blocksize);
++ }
+ de = dx_pack_dirents(data1, blocksize);
+ de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+ (char *) de,
+@@ -1643,13 +1980,21 @@ static struct ext4_dir_entry_2 *do_split
+ dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
+ dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+
+- /* Which block gets the new entry? */
+- if (hinfo->hash >= hash2)
+- {
+- swap(*bh, bh2);
+- de = de2;
++ ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
++ frame->at); /* notify block is being split */
++ if (hinfo->hash < hash2) {
++ dx_insert_block(frame, hash2 + continued, newblock);
++
++ } else {
++ /* switch block number */
++ dx_insert_block(frame, hash2 + continued,
++ dx_get_block(frame->at));
++ dx_set_block(frame->at, newblock);
++ (frame->at)++;
+ }
+- dx_insert_block(frame, hash2 + continued, newblock);
++ ext4_htree_spin_unlock(lck);
++ ext4_htree_dx_unlock(lck);
++
+ err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
+ if (err)
+ goto journal_error;
+@@ -1809,7 +2154,7 @@ static int add_dirent_to_buf(handle_t *h
+ */
+ dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ ext4_update_dx_flag(dir);
+- dir->i_version++;
++ inode_inc_iversion(dir);
+ ext4_mark_inode_dirty(handle, dir);
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_dirent_node(handle, dir, bh);
+@@ -1829,7 +2174,7 @@ static int make_indexed_dir(handle_t *ha
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ struct buffer_head *bh2;
+- struct dx_frame frames[2], *frame;
++ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ struct dx_entry *entries;
+ struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+ struct ext4_dir_entry_tail *t;
+@@ -1923,7 +2268,7 @@ static int make_indexed_dir(handle_t *ha
+ ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+ ext4_handle_dirty_dirent_node(handle, dir, bh);
+
+- de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
++ de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
+ if (!de) {
+ /*
+ * Even if the block split failed, we have to properly write
+@@ -2030,7 +2375,7 @@ out:
+ * the entry, as someone else might have used it while you slept.
+ */
+ int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+- struct inode *inode)
++ struct inode *inode, struct htree_lock *lck)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct buffer_head *bh;
+@@ -2066,9 +2411,10 @@ int ext4_add_entry(handle_t *handle, str
+ if (dentry->d_name.len == 2 &&
+ memcmp(dentry->d_name.name, "..", 2) == 0)
+ return ext4_update_dotdot(handle, dentry, inode);
+- retval = ext4_dx_add_entry(handle, dentry, inode);
++ retval = ext4_dx_add_entry(handle, dentry, inode, lck);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+ return retval;
++ ext4_htree_safe_relock(lck);
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ dx_fallback++;
+ ext4_mark_inode_dirty(handle, dir);
+@@ -2114,18 +2460,21 @@ EXPORT_SYMBOL(ext4_add_entry);
+ * Returns 0 for success, or a negative error value
+ */
+ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+- struct inode *inode)
++ struct inode *inode, struct htree_lock *lck)
+ {
+- struct dx_frame frames[2], *frame;
++ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ struct dx_entry *entries, *at;
+ struct dx_hash_info hinfo;
+ struct buffer_head *bh;
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct super_block *sb = dir->i_sb;
+ struct ext4_dir_entry_2 *de;
++ int restart;
+ int err;
+
+- frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
++again:
++ restart = 0;
++ frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
+ if (!frame)
+ return err;
+ entries = frame->entries;
+@@ -2137,33 +2486,53 @@ static int ext4_dx_add_entry(handle_t *h
+ goto cleanup;
+ }
+
+- BUFFER_TRACE(bh, "get_write_access");
+- err = ext4_journal_get_write_access(handle, bh);
+- if (err)
+- goto journal_error;
+-
+ err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+ if (err != -ENOSPC)
+ goto cleanup;
+
++ err = 0;
+ /* Block full, should compress but for now just split */
+ dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
+ dx_get_count(entries), dx_get_limit(entries)));
+ /* Need to split index? */
+ if (dx_get_count(entries) == dx_get_limit(entries)) {
+ ext4_lblk_t newblock;
+- unsigned icount = dx_get_count(entries);
+- int levels = frame - frames;
++ int levels = frame - frames + 1;
++ unsigned icount;
++ int add_level = 1;
+ struct dx_entry *entries2;
+ struct dx_node *node2;
+ struct buffer_head *bh2;
+
+- if (levels && (dx_get_count(frames->entries) ==
+- dx_get_limit(frames->entries))) {
+- ext4_warning(sb, "Directory index full!");
++ if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
++ ext4_htree_safe_relock(lck);
++ restart = 1;
++ goto cleanup;
++ }
++ while (frame > frames) {
++ if (dx_get_count((frame - 1)->entries) <
++ dx_get_limit((frame - 1)->entries)) {
++ add_level = 0;
++ break;
++ }
++ frame--; /* split higher index block */
++ at = frame->at;
++ entries = frame->entries;
++ restart = 1;
++ }
++ if (add_level && levels == ext4_dir_htree_level(sb)) {
++ ext4_warning(sb, "Directory (ino: %lu) index full, "
++ "reach max htree level :%d",
++ dir->i_ino, levels);
++ if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
++ ext4_warning(sb, "Large directory feature is"
++ "not enabled on this "
++ "filesystem");
++ }
+ err = -ENOSPC;
+ goto cleanup;
+ }
++ icount = dx_get_count(entries);
+ bh2 = ext4_append(handle, dir, &newblock);
+ if (IS_ERR(bh2)) {
+ err = PTR_ERR(bh2);
+@@ -2178,7 +2547,7 @@ static int ext4_dx_add_entry(handle_t *h
+ err = ext4_journal_get_write_access(handle, frame->bh);
+ if (err)
+ goto journal_error;
+- if (levels) {
++ if (!add_level) {
+ unsigned icount1 = icount/2, icount2 = icount - icount1;
+ unsigned hash2 = dx_get_hash(entries + icount1);
+ dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
+@@ -2186,7 +2555,7 @@ static int ext4_dx_add_entry(handle_t *h
+
+ BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
+ err = ext4_journal_get_write_access(handle,
+- frames[0].bh);
++ (frame - 1)->bh);
+ if (err)
+ goto journal_error;
+
+@@ -2202,18 +2571,24 @@ static int ext4_dx_add_entry(handle_t *h
+ frame->entries = entries = entries2;
+ swap(frame->bh, bh2);
+ }
+- dx_insert_block(frames + 0, hash2, newblock);
+- dxtrace(dx_show_index("node", frames[1].entries));
++ dx_insert_block((frame - 1), hash2, newblock);
++ dxtrace(dx_show_index("node", frame->entries));
+ dxtrace(dx_show_index("node",
+ ((struct dx_node *) bh2->b_data)->entries));
+ err = ext4_handle_dirty_dx_node(handle, dir, bh2);
+ if (err)
+ goto journal_error;
+ brelse (bh2);
++ ext4_handle_dirty_metadata(handle, inode,
++ (frame - 1)->bh);
++ if (restart) {
++ ext4_handle_dirty_metadata(handle, inode,
++ frame->bh);
++ goto cleanup;
++ }
+ } else {
+ struct dx_root_info * info;
+- dxtrace(printk(KERN_DEBUG
+- "Creating second level index...\n"));
++
+ memcpy((char *) entries2, (char *) entries,
+ icount * sizeof(struct dx_entry));
+ dx_set_limit(entries2, dx_node_limit(dir));
+@@ -2223,35 +2598,63 @@ static int ext4_dx_add_entry(handle_t *h
+ dx_set_block(entries + 0, newblock);
+ info = dx_get_dx_info((struct ext4_dir_entry_2*)
+ frames[0].bh->b_data);
+- info->indirect_levels = 1;
++ info->indirect_levels += 1;
++ dxtrace(printk(KERN_DEBUG
++ "Creating %d level index...\n",
++ info->indirect_levels));
++ ext4_handle_dirty_metadata(handle, inode, frame->bh);
++ ext4_handle_dirty_metadata(handle, inode, bh2);
++ brelse(bh2);
++ restart = 1;
++ goto cleanup;
++ }
++ } else if (!ext4_htree_dx_locked(lck)) {
++ struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
+
+- /* Add new access path frame */
+- frame = frames + 1;
+- frame->at = at = at - entries + entries2;
+- frame->entries = entries = entries2;
+- frame->bh = bh2;
+- err = ext4_journal_get_write_access(handle,
+- frame->bh);
+- if (err)
+- goto journal_error;
++ /* not well protected, require DX lock */
++ ext4_htree_dx_need_lock(lck);
++ at = frame > frames ? (frame - 1)->at : NULL;
++
++ /* NB: no risk of deadlock because it's just a try.
++ *
++ * NB: we check ld_count for twice, the first time before
++ * having DX lock, the second time after holding DX lock.
++ *
++ * NB: We never free blocks for directory so far, which
++ * means value returned by dx_get_count() should equal to
++ * ld->ld_count if nobody split any DE-block under @at,
++ * and ld->ld_at still points to valid dx_entry. */
++ if ((ld->ld_count != dx_get_count(entries)) ||
++ !ext4_htree_dx_lock_try(lck, at) ||
++ (ld->ld_count != dx_get_count(entries))) {
++ restart = 1;
++ goto cleanup;
+ }
+- err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
++ /* OK, I've got DX lock and nothing changed */
++ frame->at = ld->ld_at;
+ if (err) {
+ ext4_std_error(inode->i_sb, err);
+ goto cleanup;
+ }
+ }
+- de = do_split(handle, dir, &bh, frame, &hinfo, &err);
++ de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
+ if (!de)
+ goto cleanup;
++
+ err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+ goto cleanup;
+
+ journal_error:
+ ext4_std_error(dir->i_sb, err);
+ cleanup:
++ ext4_htree_dx_unlock(lck);
++ ext4_htree_de_unlock(lck);
+ brelse(bh);
+ dx_release(frames);
++ /* @restart is true means htree-path has been changed, we need to
++ * repeat dx_probe() to find out valid htree-path */
++ if (restart && err == 0)
++ goto again;
+ return err;
+ }
+
+@@ -2288,7 +2691,7 @@ int ext4_generic_delete_entry(handle_t *
+ blocksize);
+ else
+ de->inode = 0;
+- dir->i_version++;
++ inode_inc_iversion(dir);
+ return 0;
+ }
+ i += ext4_rec_len_from_disk(de->rec_len, blocksize);
+@@ -2373,7 +2776,7 @@ EXPORT_SYMBOL(ext4_dec_count);
+ static int ext4_add_nondir(handle_t *handle,
+ struct dentry *dentry, struct inode *inode)
+ {
+- int err = ext4_add_entry(handle, dentry, inode);
++ int err = ext4_add_entry(handle, dentry, inode, NULL);
+ if (!err) {
+ ext4_mark_inode_dirty(handle, inode);
+ unlock_new_inode(inode);
+@@ -2641,7 +3044,7 @@ retry:
+ goto out_clear_inode;
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (!err)
+- err = ext4_add_entry(handle, dentry, inode);
++ err = ext4_add_entry(handle, dentry, inode, NULL);
+ if (err) {
+ out_clear_inode:
+ clear_nlink(inode);
+@@ -2907,7 +3310,7 @@ static int ext4_rmdir(struct inode *dir,
+ dquot_initialize(dentry->d_inode);
+
+ retval = -ENOENT;
+- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, NULL);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ if (!bh)
+@@ -2974,7 +3377,7 @@ static int ext4_unlink(struct inode *dir
+ dquot_initialize(dentry->d_inode);
+
+ retval = -ENOENT;
+- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, NULL);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ if (!bh)
+@@ -3153,7 +3556,7 @@ retry:
+ ext4_inc_count(handle, inode);
+ ihold(inode);
+
+- err = ext4_add_entry(handle, dentry, inode);
++ err = ext4_add_entry(handle, dentry, inode, NULL);
+ if (!err) {
+ ext4_mark_inode_dirty(handle, inode);
+ d_instantiate(dentry, inode);
+@@ -3183,7 +3556,7 @@ retry:
+ struct buffer_head *bh;
+ struct ext4_dir_entry_2 *de;
+
+- bh = ext4_find_entry(dir, d_name, &de, NULL);
++ bh = ext4_find_entry(dir, d_name, &de, NULL, NULL);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ if (bh) {
+@@ -3230,7 +3633,7 @@ static int ext4_rename(struct inode *old
+ if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ ext4_handle_sync(handle);
+
+- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL, NULL);
+ if (IS_ERR(old.bh))
+ return PTR_ERR(old.bh);
+ /*
+@@ -3244,7 +3647,7 @@ static int ext4_rename(struct inode *old
+
+ new_inode = new_dentry->d_inode;
+ new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+- &new.de, &new.inlined);
++ &new.de, &new.inlined, NULL);
+ if (IS_ERR(new.bh)) {
+ if (!new_inode) {
+ brelse(new_bh);
+@@ -3275,7 +3678,7 @@ static int ext4_rename(struct inode *old
+ goto end_rename;
+ }
+ if (!new.bh) {
+- retval = ext4_add_entry(handle, new.dentry, old.inode);
++ retval = ext4_add_entry(handle, new.dentry, old.inode, NULL);
+ if (retval)
+ goto end_rename;
+ } else {
+@@ -3375,7 +3678,7 @@ static int ext4_rename(struct inode *old
+ dquot_initialize(new.dir);
+
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
+- &old.de, &old.inlined);
++ &old.de, &old.inlined, NULL);
+ /*
+ * Check for inode number is _not_ due to possible IO errors.
+ * We might rmdir the source, keep it as pwd of some process
+@@ -3475,7 +3678,7 @@ static int ext4_rename(struct inode *old
+ goto end_rename;
+
+ new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+- &new.de, &new.inlined);
++ &new.de, &new.inlined, NULL);
+
+ /* RENAME_EXCHANGE case: old *and* new must both exist */
+ if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+@@ -4264,7 +4264,7 @@ struct inode *ext4_iget(struct super_blo
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
+ ei->i_file_acl |=
+ ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+- inode->i_size = ext4_isize(raw_inode);
++ inode->i_size = ext4_isize(sb, raw_inode);
+ ei->i_disksize = inode->i_size;
+ #ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+@@ -4499,7 +4499,7 @@ static int ext4_do_update_inode(handle_t
+ raw_inode->i_file_acl_high =
+ cpu_to_le16(ei->i_file_acl >> 32);
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+- if (ei->i_disksize != ext4_isize(raw_inode)) {
++ if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
+ ext4_isize_set(raw_inode, ei->i_disksize);
+ need_datasync = 1;
+ }
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/Makefile
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/Makefile
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/Makefile
+@@ -8,7 +8,7 @@ ext4-y := balloc.o bitmap.o dir.o file.o
+ ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+ ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
+ mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
+- xattr_trusted.o inline.o
++ xattr_trusted.o inline.o htree_lock.o
+
+ ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
+ ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/super.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/super.c
+@@ -872,6 +872,7 @@ static struct inode *ext4_alloc_inode(st
+
+ ei->vfs_inode.i_version = 1;
+ spin_lock_init(&ei->i_raw_lock);
++ sema_init(&ei->i_append_sem, 1);
+ INIT_LIST_HEAD(&ei->i_prealloc_list);
+ spin_lock_init(&ei->i_prealloc_lock);
+ ext4_es_init_tree(&ei->i_es_tree);
--- /dev/null
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/ext4.h
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/ext4.h
+@@ -1243,11 +1243,14 @@ struct ext4_sb_info {
+
+ /* tunables */
+ unsigned long s_stripe;
+- unsigned int s_mb_stream_request;
++ unsigned long s_mb_small_req;
++ unsigned long s_mb_large_req;
+ unsigned int s_mb_max_to_scan;
+ unsigned int s_mb_min_to_scan;
+ unsigned int s_mb_stats;
+ unsigned int s_mb_order2_reqs;
++ unsigned long *s_mb_prealloc_table;
++ unsigned long s_mb_prealloc_table_size;
+ unsigned int s_mb_group_prealloc;
+ unsigned int s_max_writeback_mb_bump;
+ unsigned int s_max_dir_size_kb;
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/mballoc.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/mballoc.c
+@@ -1828,6 +1828,25 @@ int ext4_mb_find_by_goal(struct ext4_all
+ return 0;
+ }
+
++static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
++{
++ int i;
++
++ if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
++ return;
++
++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
++ if (sbi->s_mb_prealloc_table[i] == 0) {
++ sbi->s_mb_prealloc_table[i] = value;
++ return;
++ }
++
++ /* they should add values in order */
++ if (value <= sbi->s_mb_prealloc_table[i])
++ return;
++ }
++}
++
+ /*
+ * The routine scans buddy structures (not bitmap!) from given order
+ * to max order and tries to find big enough chunk to satisfy the req
+@@ -2263,6 +2282,86 @@ static const struct seq_operations ext4_
+ .show = ext4_mb_seq_groups_show,
+ };
+
++#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
++
++static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
++ const char __user *buf,
++ size_t cnt, loff_t *pos)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
++ unsigned long value;
++ unsigned long prev = 0;
++ char str[128];
++ char *cur;
++ char *end;
++ unsigned long *new_table;
++ int num = 0;
++ int i = 0;
++
++ if (cnt >= sizeof(str))
++ return -EINVAL;
++ if (copy_from_user(str, buf, cnt))
++ return -EFAULT;
++
++ num = 0;
++ cur = str;
++ end = str + cnt;
++ while (cur < end) {
++ while ((cur < end) && (*cur == ' ')) cur++;
++ value = simple_strtol(cur, &cur, 0);
++ if (value == 0)
++ break;
++ if (value <= prev)
++ return -EINVAL;
++ prev = value;
++ num++;
++ }
++
++ new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
++ if (new_table == NULL)
++ return -ENOMEM;
++ kfree(sbi->s_mb_prealloc_table);
++ memset(new_table, 0, num * sizeof(*new_table));
++ sbi->s_mb_prealloc_table = new_table;
++ sbi->s_mb_prealloc_table_size = num;
++ cur = str;
++ end = str + cnt;
++ while (cur < end && i < num) {
++ while ((cur < end) && (*cur == ' ')) cur++;
++ value = simple_strtol(cur, &cur, 0);
++ ext4_mb_prealloc_table_add(sbi, value);
++ i++;
++ }
++
++ return cnt;
++}
++
++static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(m->private);
++ int i;
++
++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
++ seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
++ seq_printf(m, "\n");
++
++ return 0;
++}
++
++static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
++}
++
++struct file_operations ext4_mb_prealloc_seq_fops = {
++ .owner = THIS_MODULE,
++ .open = mb_prealloc_table_seq_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ .write = ext4_mb_prealloc_table_proc_write,
++};
++
+ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
+ {
+ struct super_block *sb = PDE_DATA(inode);
+@@ -2557,7 +2656,6 @@ int ext4_mb_init(struct super_block *sb)
+ sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
+ sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+ sbi->s_mb_stats = MB_DEFAULT_STATS;
+- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
+ sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+ /*
+ * The default group preallocation is 512, which for 4k block
+@@ -2581,9 +2679,48 @@ int ext4_mb_init(struct super_block *sb)
+ * RAID stripe size so that preallocations don't fragment
+ * the stripes.
+ */
+- if (sbi->s_stripe > 1) {
+- sbi->s_mb_group_prealloc = roundup(
+- sbi->s_mb_group_prealloc, sbi->s_stripe);
++
++ if (sbi->s_stripe == 0) {
++ sbi->s_mb_prealloc_table_size = 10;
++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++ if (sbi->s_mb_prealloc_table == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memset(sbi->s_mb_prealloc_table, 0, i);
++
++ ext4_mb_prealloc_table_add(sbi, 4);
++ ext4_mb_prealloc_table_add(sbi, 8);
++ ext4_mb_prealloc_table_add(sbi, 16);
++ ext4_mb_prealloc_table_add(sbi, 32);
++ ext4_mb_prealloc_table_add(sbi, 64);
++ ext4_mb_prealloc_table_add(sbi, 128);
++ ext4_mb_prealloc_table_add(sbi, 256);
++ ext4_mb_prealloc_table_add(sbi, 512);
++ ext4_mb_prealloc_table_add(sbi, 1024);
++ ext4_mb_prealloc_table_add(sbi, 2048);
++
++ sbi->s_mb_small_req = 256;
++ sbi->s_mb_large_req = 1024;
++ sbi->s_mb_group_prealloc = 512;
++ } else {
++ sbi->s_mb_prealloc_table_size = 3;
++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++ if (sbi->s_mb_prealloc_table == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memset(sbi->s_mb_prealloc_table, 0, i);
++
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
++
++ sbi->s_mb_small_req = sbi->s_stripe;
++ sbi->s_mb_large_req = sbi->s_stripe * 8;
++ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
+ }
+
+ sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
+@@ -2605,9 +2742,13 @@ int ext4_mb_init(struct super_block *sb)
+ if (ret != 0)
+ goto out_free_locality_groups;
+
+- if (sbi->s_proc)
++ if (sbi->s_proc) {
+ proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
+ &ext4_mb_seq_groups_fops, sb);
++ proc_create_data(EXT4_MB_PREALLOC_TABLE, S_IFREG | S_IRUGO |
++ S_IWUSR, sbi->s_proc,
++ &ext4_mb_prealloc_seq_fops, sb);
++ }
+
+ return 0;
+
+@@ -2615,6 +2756,7 @@ out_free_locality_groups:
+ free_percpu(sbi->s_locality_groups);
+ sbi->s_locality_groups = NULL;
+ out:
++ kfree(sbi->s_mb_prealloc_table);
+ kfree(sbi->s_mb_offsets);
+
+
+@@ -2651,8 +2793,10 @@ int ext4_mb_release(struct super_block *
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+
+- if (sbi->s_proc)
++ if (sbi->s_proc) {
+ remove_proc_entry("mb_groups", sbi->s_proc);
++ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
++ }
+
+ if (sbi->s_group_info) {
+ for (i = 0; i < ngroups; i++) {
+@@ -2963,9 +3107,9 @@ ext4_mb_normalize_request(struct ext4_al
+ struct ext4_allocation_request *ar)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- int bsbits, max;
++ int bsbits, i, wind;
+ ext4_lblk_t end;
+- loff_t size, start_off;
++ loff_t size;
+ loff_t orig_size __maybe_unused;
+ ext4_lblk_t start;
+ struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+@@ -2998,51 +3142,34 @@ ext4_mb_normalize_request(struct ext4_al
+ size = size << bsbits;
+ if (size < i_size_read(ac->ac_inode))
+ size = i_size_read(ac->ac_inode);
+- orig_size = size;
++ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
+
+- /* max size of free chunks */
+- max = 2 << bsbits;
++ start = wind = 0;
+
+-#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
+- (req <= (size) || max <= (chunk_size))
++ /* let's choose preallocation window depending on file size */
++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
++ if (size <= sbi->s_mb_prealloc_table[i]) {
++ wind = sbi->s_mb_prealloc_table[i];
++ break;
++ }
++ }
++ size = wind;
+
+- /* first, try to predict filesize */
+- /* XXX: should this table be tunable? */
+- start_off = 0;
+- if (size <= 16 * 1024) {
+- size = 16 * 1024;
+- } else if (size <= 32 * 1024) {
+- size = 32 * 1024;
+- } else if (size <= 64 * 1024) {
+- size = 64 * 1024;
+- } else if (size <= 128 * 1024) {
+- size = 128 * 1024;
+- } else if (size <= 256 * 1024) {
+- size = 256 * 1024;
+- } else if (size <= 512 * 1024) {
+- size = 512 * 1024;
+- } else if (size <= 1024 * 1024) {
+- size = 1024 * 1024;
+- } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
+- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+- (21 - bsbits)) << 21;
+- size = 2 * 1024 * 1024;
+- } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
+- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+- (22 - bsbits)) << 22;
+- size = 4 * 1024 * 1024;
+- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
+- (8<<20)>>bsbits, max, 8 * 1024)) {
+- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+- (23 - bsbits)) << 23;
+- size = 8 * 1024 * 1024;
+- } else {
+- start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
+- size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
+- ac->ac_o_ex.fe_len) << bsbits;
++ if (wind == 0) {
++ __u64 tstart, tend;
++ /* file is quite large, we now preallocate with
++ * the biggest configured window with regart to
++ * logical offset */
++ wind = sbi->s_mb_prealloc_table[i - 1];
++ tstart = ac->ac_o_ex.fe_logical;
++ do_div(tstart, wind);
++ start = tstart * wind;
++ tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
++ do_div(tend, wind);
++ tend = tend * wind + wind;
++ size = tend - start;
+ }
+- size = size >> bsbits;
+- start = start_off >> bsbits;
++ orig_size = size;
+
+ /* don't cover already allocated blocks in selected range */
+ if (ar->pleft && start <= ar->lleft) {
+@@ -3117,7 +3245,6 @@ ext4_mb_normalize_request(struct ext4_al
+ BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
+ start > ac->ac_o_ex.fe_logical);
+ }
+- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+
+ /* now prepare goal request */
+
+@@ -4056,11 +4183,19 @@ static void ext4_mb_group_or_file(struct
+
+ /* don't use group allocation for large files */
+ size = max(size, isize);
+- if (size > sbi->s_mb_stream_request) {
++ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
++ (size >= sbi->s_mb_large_req)) {
+ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ return;
+ }
+
++ /*
++ * request is so large that we don't care about
++ * streaming - it overweights any possible seek
++ */
++ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
++ return;
++
+ BUG_ON(ac->ac_lg != NULL);
+ /*
+ * locality group prealloc space are per cpu. The reason for having
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/super.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/super.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/super.c
+@@ -2555,7 +2555,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
+ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+@@ -2578,7 +2579,8 @@ static struct attribute *ext4_attrs[] =
+ ATTR_LIST(mb_max_to_scan),
+ ATTR_LIST(mb_min_to_scan),
+ ATTR_LIST(mb_order2_req),
+- ATTR_LIST(mb_stream_req),
++ ATTR_LIST(mb_small_req),
++ ATTR_LIST(mb_large_req),
+ ATTR_LIST(mb_group_prealloc),
+ ATTR_LIST(max_writeback_mb_bump),
+ ATTR_LIST(extent_max_zeroout_kb),
+Index: linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.el7.x86_64/fs/ext4/inode.c
+@@ -2476,6 +2476,10 @@ static int ext4_da_writepages(struct add
+ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
+ return -EROFS;
+
++ if (wbc->nr_to_write < sbi->s_mb_small_req) {
++ wbc->nr_to_write = sbi->s_mb_small_req;
++ }
++
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
+
--- /dev/null
+There will cause a deadlock if invoke ext4_truncate with i_mutex locked
+in lustre. Since lustre has own lock to provide protect so we don't
+need this check at all.
+
+Index: linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+===================================================================
+--- linux-3.10.0-123.13.2.el7.x86_64.orig/fs/ext4/inode.c
++++ linux-3.10.0-123.13.2.el7.x86_64/fs/ext4/inode.c
+@@ -3847,8 +3847,6 @@ void ext4_truncate(struct inode *inode)
+ * or it completely new indode. In those cases we might not
+ * have i_mutex locked because it's not necessary.
+ */
+- if (!(inode->i_state & (I_NEW|I_FREEING)))
+- WARN_ON(!mutex_is_locked(&inode->i_mutex));
+ trace_ext4_truncate_enter(inode);
+
+ if (!ext4_can_truncate(inode))
--- /dev/null
+rhel7/ext4-inode-version.patch
+rhel7/ext4-lookup-dotdot.patch
+rhel6.3/ext4-print-inum-in-htree-warning.patch
+rhel7/ext4-prealloc.patch
+rhel7/ext4-mballoc-extra-checks.patch
+rhel7/ext4-misc.patch
+rhel7/ext4-osd-iop-common.patch
+rhel7/ext4-hash-indexed-dir-dotdot-update.patch
+rhel7/ext4-kill-dx-root.patch
+rhel7/ext4-mballoc-pa-free-mismatch.patch
+rhel7/ext4-data-in-dirent.patch
+rhel7/ext4-large-eas.patch
+rhel7/ext4-disable-mb-cache.patch
+rhel7/ext4-nocmtime.patch
+rhel7/ext4-pdirop.patch
+rhel7/ext4-max-dir-size.patch
+rhel7/ext4-remove-truncate-warning.patch
/* XATTR_{REPLACE,CREATE} */
#include <linux/xattr.h>
+#include <ldiskfs/ldiskfs.h>
+#include <ldiskfs/xattr.h>
+#undef ENTRY
/*
* struct OBD_{ALLOC,FREE}*()
* OBD_FAIL_CHECK
#include <md_object.h>
#include <lustre_quota.h>
-#include <ldiskfs/xattr.h>
#include <lustre_linkea.h>
int ldiskfs_pdo = 1;
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+
+#include <ldiskfs/ldiskfs.h>
+#include <ldiskfs/xattr.h>
+#undef ENTRY
+
#include "osd_internal.h"
-#include "xattr.h"
-#include "acl.h"
+#include <ldiskfs/acl.h>
/*
* List of all registered formats.