+++ /dev/null
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -4642,6 +4646,11 @@ static void __exit exit_ext4_fs(void)
- exit_ext4_system_zone();
- }
-
-+EXPORT_SYMBOL(ext4_bread);
-+EXPORT_SYMBOL(ext4_journal_start_sb);
-+EXPORT_SYMBOL(__ext4_journal_stop);
-+EXPORT_SYMBOL(ext4_force_commit);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Fourth Extended Filesystem");
- MODULE_LICENSE("GPL");
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1643,6 +1643,8 @@ extern unsigned ext4_init_inode_bitmap(s
- struct buffer_head *bh,
- ext4_group_t group,
- struct ext4_group_desc *desc);
-+extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
-+ ext4_group_t block_group);
- extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
- extern int ext4_init_inode_table(struct super_block *sb,
- ext4_group_t group, int barrier);
-Index: linux-stage/fs/ext4/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext4/ialloc.c
-+++ linux-stage/fs/ext4/ialloc.c
-@@ -97,7 +97,7 @@ unsigned ext4_init_inode_bitmap(struct s
- *
- * Return buffer_head of bitmap on success or NULL.
- */
--static struct buffer_head *
-+struct buffer_head *
- ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
- {
- struct ext4_group_desc *desc;
-@@ -161,6 +161,7 @@ ext4_read_inode_bitmap(struct super_bloc
- }
- return bh;
- }
-+EXPORT_SYMBOL(ext4_read_inode_bitmap);
-
- /*
- * NOTE! When we get the inode, we're the only people
-Index: linux-stage/fs/ext4/balloc.c
-===================================================================
---- linux-stage.orig/fs/ext4/balloc.c
-+++ linux-stage/fs/ext4/balloc.c
-@@ -229,6 +229,7 @@ struct ext4_group_desc * ext4_get_group_
- *bh = sbi->s_group_desc[group_desc];
- return desc;
- }
-+EXPORT_SYMBOL(ext4_get_group_desc);
-
- static int ext4_valid_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *desc,
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -5131,6 +5131,7 @@ out_stop:
-
- ext4_journal_stop(handle);
- }
-+EXPORT_SYMBOL(ext4_truncate);
-
- /*
- * ext4_get_inode_loc returns with an extra refcount against the inode's
+++ /dev/null
-From c49bafa3842751b8955a962859f42d307673d75d Mon Sep 17 00:00:00 2001
-From: Dan Carpenter <error27@gmail.com>
-Date: Sat, 30 Jul 2011 12:58:41 -0400
-Subject: ext4: add missing kfree() on error return path in add_new_gdb()
-Git-commit: c49bafa3
-Patch-mainline: v3.1-rc1
-
-We added some more error handling in b40971426a "ext4: add error
-checking to calls to ext4_handle_dirty_metadata()". But we need to
-call kfree() as well to avoid a memory leak.
-
-Signed-off-by: Dan Carpenter <error27@gmail.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Acked-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/ext4/resize.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/fs/ext4/resize.c
-+++ b/fs/ext4/resize.c
-@@ -475,6 +475,7 @@ static int add_new_gdb(handle_t *handle,
- return 0;
-
- exit_inode:
-+ kfree(n_group_desc);
- /* ext4_journal_release_buffer(handle, iloc.bh); */
- brelse(iloc.bh);
- exit_dindj:
+++ /dev/null
-From dabd991f9d8e3232bb4531c920daddac8d10d313 Mon Sep 17 00:00:00 2001
-From: Namhyung Kim <namhyung@gmail.com>
-Date: Mon, 10 Jan 2011 12:11:16 -0500
-Subject: ext4: add more error checks to ext4_mkdir()
-Git-commit: dabd991f
-Patch-mainline: v2.6.38-rc1
-
-Check return value of ext4_journal_get_write_access,
-ext4_journal_dirty_metadata and ext4_mark_inode_dirty. Move brelse()
-under 'out_stop' to release bh properly in case of journal error.
-
-Upstream-Signed-off-by: Namhyung Kim <namhyung@gmail.com>
-Upstream-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Upstream-Acked-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/ext4/namei.c | 21 ++++++++++++++-------
- 1 files changed, 14 insertions(+), 7 deletions(-)
-
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 96a594d..6dfc5b9 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -1789,7 +1789,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
- {
- handle_t *handle;
- struct inode *inode;
-- struct buffer_head *dir_block;
-+ struct buffer_head *dir_block = NULL;
- struct ext4_dir_entry_2 *de;
- unsigned int blocksize = dir->i_sb->s_blocksize;
- int err, retries = 0;
-@@ -1822,7 +1822,9 @@ retry:
- if (!dir_block)
- goto out_clear_inode;
- BUFFER_TRACE(dir_block, "get_write_access");
-- ext4_journal_get_write_access(handle, dir_block);
-+ err = ext4_journal_get_write_access(handle, dir_block);
-+ if (err)
-+ goto out_clear_inode;
- de = (struct ext4_dir_entry_2 *) dir_block->b_data;
- de->inode = cpu_to_le32(inode->i_ino);
- de->name_len = 1;
-@@ -1839,10 +1841,12 @@ retry:
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
- inode->i_nlink = 2;
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-- ext4_handle_dirty_metadata(handle, inode, dir_block);
-- brelse(dir_block);
-- ext4_mark_inode_dirty(handle, inode);
-- err = ext4_add_entry(handle, dentry, inode);
-+ err = ext4_handle_dirty_metadata(handle, inode, dir_block);
-+ if (err)
-+ goto out_clear_inode;
-+ err = ext4_mark_inode_dirty(handle, inode);
-+ if (!err)
-+ err = ext4_add_entry(handle, dentry, inode);
- if (err) {
- out_clear_inode:
- clear_nlink(inode);
-@@ -1853,10 +1857,13 @@ out_clear_inode:
- }
- ext4_inc_count(handle, dir);
- ext4_update_dx_flag(dir);
-- ext4_mark_inode_dirty(handle, dir);
-+ err = ext4_mark_inode_dirty(handle, dir);
-+ if (err)
-+ goto out_clear_inode;
- d_instantiate(dentry, inode);
- unlock_new_inode(inode);
- out_stop:
-+ brelse(dir_block);
- ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
-
+++ /dev/null
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -1117,9 +1117,53 @@ static ssize_t ext4_quota_read(struct su
- static ssize_t ext4_quota_write(struct super_block *sb, int type,
- const char *data, size_t len, loff_t off);
-
-+static int ext4_dquot_initialize(struct inode *inode, int type)
-+{
-+ handle_t *handle;
-+ int ret, err;
-+
-+ if (IS_NOQUOTA(inode))
-+ return 0;
-+
-+ /* We may create quota structure so we need to reserve enough blocks */
-+ handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ ret = dquot_initialize(inode, type);
-+ err = ext4_journal_stop(handle);
-+ if (!ret)
-+ ret = err;
-+ return ret;
-+}
-+
-+static int ext4_dquot_drop(struct inode *inode)
-+{
-+ handle_t *handle;
-+ int ret, err;
-+
-+ if (IS_NOQUOTA(inode))
-+ return 0;
-+
-+ /* We may delete quota structure so we need to reserve enough blocks */
-+ handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
-+ if (IS_ERR(handle)) {
-+ /*
-+ * We call dquot_drop() anyway to at least release references
-+ * to quota structures so that umount does not hang.
-+ */
-+ dquot_drop(inode);
-+ return PTR_ERR(handle);
-+ }
-+ ret = dquot_drop(inode);
-+ err = ext4_journal_stop(handle);
-+ if (!ret)
-+ ret = err;
-+ return ret;
-+}
-+
- static const struct dquot_operations ext4_quota_operations = {
-- .initialize = dquot_initialize,
-- .drop = dquot_drop,
-+ .initialize = ext4_dquot_initialize,
-+ .drop = ext4_dquot_drop,
- .alloc_space = dquot_alloc_space,
- .reserve_space = dquot_reserve_space,
- .claim_space = dquot_claim_space,
+++ /dev/null
-this patch implements feature which allows ext4 fs users (e.g. Lustre)
-to store data in ext4 dirent.
-data is stored in ext4 dirent after file-name, this space is accounted
-in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
-is present.
-
-make use of dentry->d_fsdata to pass fid to ext4. so no
-changes in ext4_add_entry() interface required.
-
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/dir.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/dir.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/dir.c
-@@ -37,11 +37,18 @@ static int ext4_dx_readdir(struct file *
-
- static unsigned char get_dtype(struct super_block *sb, int filetype)
- {
-+ int fl_index = filetype & EXT4_FT_MASK;
-+
- if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
-- (filetype >= EXT4_FT_MAX))
-+ (fl_index >= EXT4_FT_MAX))
- return DT_UNKNOWN;
-
-- return (ext4_filetype_table[filetype]);
-+ if (!test_opt(sb, DIRDATA))
-+ return (ext4_filetype_table[fl_index]);
-+
-+ return (ext4_filetype_table[fl_index]) |
-+ (filetype & EXT4_DIRENT_LUFID);
-+
- }
-
- /**
-@@ -73,11 +80,11 @@ int ext4_check_dir_entry(const char *fun
- const int rlen = ext4_rec_len_from_disk(de->rec_len,
- dir->i_sb->s_blocksize);
-
-- if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
-+ if (unlikely(rlen < __EXT4_DIR_REC_LEN(1)))
- error_msg = "rec_len is smaller than minimal";
- else if (unlikely(rlen % 4 != 0))
- error_msg = "rec_len % 4 != 0";
-- else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
-+ else if (unlikely(rlen < EXT4_DIR_REC_LEN(de)))
- error_msg = "rec_len is too small for name_len";
- else if (unlikely(((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
- error_msg = "directory entry across blocks";
-@@ -181,7 +188,7 @@ revalidate:
- * failure will be detected in the
- * dirent test below. */
- if (ext4_rec_len_from_disk(de->rec_len,
-- sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
-+ sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
- break;
- i += ext4_rec_len_from_disk(de->rec_len,
- sb->s_blocksize);
-@@ -410,12 +417,17 @@ int ext4_htree_store_dirent(struct file
- struct fname *fname, *new_fn;
- struct dir_private_info *info;
- int len;
-+ int extra_data = 0;
-
- info = (struct dir_private_info *) dir_file->private_data;
- p = &info->root.rb_node;
-
- /* Create and allocate the fname structure */
-- len = sizeof(struct fname) + dirent->name_len + 1;
-+ if (dirent->file_type & EXT4_DIRENT_LUFID)
-+ extra_data = ext4_get_dirent_data_len(dirent);
-+
-+ len = sizeof(struct fname) + dirent->name_len + extra_data + 1;
-+
- new_fn = kzalloc(len, GFP_KERNEL);
- if (!new_fn)
- return -ENOMEM;
-@@ -424,7 +436,7 @@ int ext4_htree_store_dirent(struct file
- new_fn->inode = le32_to_cpu(dirent->inode);
- new_fn->name_len = dirent->name_len;
- new_fn->file_type = dirent->file_type;
-- memcpy(new_fn->name, dirent->name, dirent->name_len);
-+ memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data);
- new_fn->name[dirent->name_len] = 0;
-
- while (*p) {
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-@@ -881,6 +881,7 @@ struct ext4_inode_info {
- #define EXT4_MOUNT_ERRORS_PANIC 0x00040 /* Panic on errors */
- #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
- #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
-+#define EXT4_MOUNT_DIRDATA 0x00200 /* Data in directory entries*/
- #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
- #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
- #define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
-@@ -1337,6 +1338,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
- #define EXT4_FEATURE_INCOMPAT_MMP 0x0100
- #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
-+#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000
-
- #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
-@@ -1345,7 +1347,9 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_INCOMPAT_EXTENTS| \
- EXT4_FEATURE_INCOMPAT_64BIT| \
- EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-- EXT4_FEATURE_INCOMPAT_MMP)
-+ EXT4_FEATURE_INCOMPAT_MMP| \
-+ EXT4_FEATURE_INCOMPAT_DIRDATA)
-+
- #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
-@@ -1431,6 +1435,43 @@ struct ext4_dir_entry_2 {
- #define EXT4_FT_SYMLINK 7
-
- #define EXT4_FT_MAX 8
-+#define EXT4_FT_MASK 0xf
-+
-+#if EXT4_FT_MAX > EXT4_FT_MASK
-+#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
-+#endif
-+
-+/*
-+ * d_type has 4 unused bits, so it can hold four types data. these different
-+ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
-+ * stored, in flag order, after file-name in ext4 dirent.
-+*/
-+/*
-+ * this flag is added to d_type if ext4 dirent has extra data after
-+ * filename. this data length is variable and length is stored in first byte
-+ * of data. data start after filename NUL byte.
-+ * This is used by Lustre FS.
-+ */
-+#define EXT4_DIRENT_LUFID 0x10
-+
-+#define EXT4_LUFID_MAGIC 0xAD200907UL
-+struct ext4_dentry_param {
-+ __u32 edp_magic; /* EXT4_LUFID_MAGIC */
-+ char edp_len; /* size of edp_data in bytes */
-+ char edp_data[0]; /* packed array of data */
-+} __attribute__((packed));
-+
-+static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
-+ struct ext4_dentry_param* p)
-+
-+{
-+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
-+ return NULL;
-+ if (p && p->edp_magic == EXT4_LUFID_MAGIC)
-+ return &p->edp_len;
-+ else
-+ return NULL;
-+}
-
- /*
- * EXT4_DIR_PAD defines the directory entries boundaries
-@@ -1439,8 +1480,11 @@ struct ext4_dir_entry_2 {
- */
- #define EXT4_DIR_PAD 4
- #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1)
--#define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \
-+#define __EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \
- ~EXT4_DIR_ROUND)
-+#define EXT4_DIR_REC_LEN(de) (__EXT4_DIR_REC_LEN(de->name_len +\
-+ ext4_get_dirent_data_len(de)))
-+
- #define EXT4_MAX_REC_LEN ((1<<16)-1)
-
- static inline unsigned int
-@@ -1841,7 +1885,7 @@ extern struct buffer_head * ext4_find_en
- struct ext4_dir_entry_2 ** res_dir);
- #define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
- extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-- struct inode *inode);
-+ struct inode *inode, const void *, const void *);
- extern struct buffer_head *ext4_append(handle_t *handle,
- struct inode *inode,
- ext4_lblk_t *block, int *err);
-@@ -2198,6 +2242,28 @@ extern wait_queue_head_t aio_wq[];
- #define to_aio_wq(v) (&aio_wq[((unsigned long)v) % WQ_HASH_SZ])
- extern void ext4_aio_wait(struct inode *inode);
-
-+/*
-+ * Compute the total directory entry data length.
-+ * This includes the filename and an implicit NUL terminator (always present),
-+ * and optional extensions. Each extension has a bit set in the high 4 bits of
-+ * de->file_type, and the extension length is the first byte in each entry.
-+ */
-+static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
-+{
-+ char *len = de->name + de->name_len + 1 /* NUL terminator */;
-+ int dlen = 0;
-+ __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
-+
-+ while (extra_data_flags) {
-+ if (extra_data_flags & 1) {
-+ dlen += *len + (dlen == 0);
-+ len += *len;
-+ }
-+ extra_data_flags >>= 1;
-+ }
-+ return dlen;
-+}
-+
- #endif /* __KERNEL__ */
-
- #endif /* _EXT4_H */
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-@@ -169,7 +169,8 @@ static unsigned dx_get_count(struct dx_e
- static unsigned dx_get_limit(struct dx_entry *entries);
- static void dx_set_count(struct dx_entry *entries, unsigned value);
- static void dx_set_limit(struct dx_entry *entries, unsigned value);
--static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-+static inline unsigned dx_root_limit(__u32 blocksize,
-+ struct ext4_dir_entry_2 *dot_de, unsigned infosize);
- static unsigned dx_node_limit(struct inode *dir);
- static struct dx_frame *dx_probe(const struct qstr *d_name,
- struct inode *dir,
-@@ -212,11 +213,12 @@ ext4_next_entry(struct ext4_dir_entry_2
- */
- struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
- {
-- /* get dotdot first */
-- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
-+ BUG_ON(de->name_len != 1);
-+ /* get dotdot first */
-+ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-
-- /* dx root info is after dotdot entry */
-- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
-+ /* dx root info is after dotdot entry */
-+ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
-
- return (struct dx_root_info *) de;
- }
-@@ -261,16 +263,23 @@ static inline void dx_set_limit(struct d
- ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
- }
-
--static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
-+static inline unsigned dx_root_limit(__u32 blocksize,
-+ struct ext4_dir_entry_2 *dot_de, unsigned infosize)
- {
-- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
-- EXT4_DIR_REC_LEN(2) - infosize;
-+ struct ext4_dir_entry_2 *dotdot_de;
-+ unsigned entry_space;
-+
-+ BUG_ON(dot_de->name_len != 1);
-+ dotdot_de = ext4_next_entry(dot_de, blocksize);
-+ entry_space = blocksize - EXT4_DIR_REC_LEN(dot_de) -
-+ EXT4_DIR_REC_LEN(dotdot_de) - infosize;
-+
- return entry_space / sizeof(struct dx_entry);
- }
-
- static inline unsigned dx_node_limit(struct inode *dir)
- {
-- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
-+ unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
- return entry_space / sizeof(struct dx_entry);
- }
-
-@@ -317,7 +326,7 @@ static struct stats dx_show_leaf(struct
- printk(":%x.%u ", h.hash,
- ((char *) de - base));
- }
-- space += EXT4_DIR_REC_LEN(de->name_len);
-+ space += EXT4_DIR_REC_LEN(de);
- names++;
- }
- de = ext4_next_entry(de, size);
-@@ -419,7 +428,8 @@ dx_probe(const struct qstr *d_name, stru
-
- entries = (struct dx_entry *) (((char *)info) + info->info_length);
-
-- if (dx_get_limit(entries) != dx_root_limit(dir,
-+ if (dx_get_limit(entries) != dx_root_limit(dir->i_sb->s_blocksize,
-+ (struct ext4_dir_entry_2*)bh->b_data,
- info->info_length)) {
- ext4_warning(dir->i_sb, "dx entry: limit != root limit");
- brelse(bh);
-@@ -608,7 +618,7 @@ static int htree_dirblock_to_tree(struct
- de = (struct ext4_dir_entry_2 *) bh->b_data;
- top = (struct ext4_dir_entry_2 *) ((char *) de +
- dir->i_sb->s_blocksize -
-- EXT4_DIR_REC_LEN(0));
-+ __EXT4_DIR_REC_LEN(0));
- for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
- if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
- (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
-@@ -1020,7 +1030,7 @@ static struct buffer_head * ext4_dx_find
- goto errout;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
- top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
-- EXT4_DIR_REC_LEN(0));
-+ __EXT4_DIR_REC_LEN(0));
- for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
- int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
- + ((char *) de - bh->b_data);
-@@ -1181,7 +1191,7 @@ dx_move_dirents(char *from, char *to, st
- while (count--) {
- struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
- (from + (map->offs<<2));
-- rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+ rec_len = EXT4_DIR_REC_LEN(de);
- memcpy (to, de, rec_len);
- ((struct ext4_dir_entry_2 *) to)->rec_len =
- ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1205,7 +1215,7 @@ static struct ext4_dir_entry_2* dx_pack_
- while ((char*)de < base + blocksize) {
- next = ext4_next_entry(de, blocksize);
- if (de->inode && de->name_len) {
-- rec_len = EXT4_DIR_REC_LEN(de->name_len);
-+ rec_len = EXT4_DIR_REC_LEN(de);
- if (de > to)
- memmove(to, de, rec_len);
- to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1334,11 +1344,28 @@ static int add_dirent_to_buf(handle_t *h
- int namelen = dentry->d_name.len;
- unsigned int offset = 0;
- unsigned int blocksize = dir->i_sb->s_blocksize;
-- unsigned short reclen;
-- int nlen, rlen, err;
-+ unsigned short reclen, dotdot_reclen = 0;
-+ int nlen, rlen, err, dlen = 0;
-+ bool is_dotdot = false, write_short_dotdot = false;
-+ unsigned char *data;
- char *top;
-
-- reclen = EXT4_DIR_REC_LEN(namelen);
-+ data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
-+ dentry->d_fsdata);
-+ if (data)
-+ dlen = (*data) + 1;
-+
-+ is_dotdot = (namelen == 2 &&
-+ memcmp(dentry->d_name.name, "..", 2) == 0);
-+
-+ /* dotdot entries must be in the second place in a directory block,
-+ * so calculate an alternate length without the FID so they can
-+ * always be made to fit in the existing slot - LU-5626 */
-+ if (is_dotdot)
-+ dotdot_reclen = __EXT4_DIR_REC_LEN(namelen);
-+
-+ reclen = __EXT4_DIR_REC_LEN(namelen + dlen);
-+
- if (!de) {
- de = (struct ext4_dir_entry_2 *)bh->b_data;
- top = bh->b_data + blocksize - reclen;
-@@ -1348,10 +1375,25 @@ static int add_dirent_to_buf(handle_t *h
- return -EIO;
- if (ext4_match(namelen, name, de))
- return -EEXIST;
-- nlen = EXT4_DIR_REC_LEN(de->name_len);
-+ nlen = EXT4_DIR_REC_LEN(de);
- rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
-- if ((de->inode? rlen - nlen: rlen) >= reclen)
-+ /* Check first for enough space for the full entry */
-+ if ((de->inode ? rlen - nlen : rlen) >= reclen)
- break;
-+ /* Then for dotdot entries, check for the smaller space
-+ * required for just the entry, no FID */
-+ if (is_dotdot) {
-+ if ((de->inode ? rlen - nlen : rlen) >=
-+ dotdot_reclen) {
-+ write_short_dotdot = true;
-+ break;
-+ }
-+ /* The new ".." entry mut be written over the
-+ * previous ".." entry, which is the first
-+ * entry traversed by this scan. If it doesn't
-+ * fit, something is badly wrong, so -EIO. */
-+ return -EIO;
-+ }
- de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
- offset += rlen;
- }
-@@ -1366,7 +1408,7 @@ static int add_dirent_to_buf(handle_t *h
- }
-
- /* By now the buffer is marked for journaling */
-- nlen = EXT4_DIR_REC_LEN(de->name_len);
-+ nlen = EXT4_DIR_REC_LEN(de);
- rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
- if (de->inode) {
- struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
-@@ -1382,6 +1424,13 @@ static int add_dirent_to_buf(handle_t *h
- de->inode = 0;
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
-+ /* If we're writing the short form of "dotdot", don't add the data section */
-+ if (data && !write_short_dotdot) {
-+ de->name[namelen] = 0;
-+ memcpy(&de->name[namelen + 1], data, *(char *) data);
-+ de->file_type |= EXT4_DIRENT_LUFID;
-+ }
-+
- /*
- * XXX shouldn't update any times until successful
- * completion of syscall, but too many callers depend
-@@ -1480,7 +1529,8 @@ static int make_indexed_dir(handle_t *ha
-
- dx_set_block(entries, 1);
- dx_set_count(entries, 1);
-- dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
-+ dx_set_limit(entries, dx_root_limit(dir->i_sb->s_blocksize,
-+ dot_de, sizeof(*dx_info)));
-
- /* Initialize as for dx_probe */
- hinfo.hash_version = dx_info->hash_version;
-@@ -1523,6 +1573,8 @@ static int ext4_update_dotdot(handle_t *
- struct buffer_head * dir_block;
- struct ext4_dir_entry_2 * de;
- int len, journal = 0, err = 0;
-+ int dlen = 0;
-+ char *data;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-@@ -1538,19 +1590,24 @@ static int ext4_update_dotdot(handle_t *
- /* the first item must be "." */
- assert(de->name_len == 1 && de->name[0] == '.');
- len = le16_to_cpu(de->rec_len);
-- assert(len >= EXT4_DIR_REC_LEN(1));
-- if (len > EXT4_DIR_REC_LEN(1)) {
-+ assert(len >= __EXT4_DIR_REC_LEN(1));
-+ if (len > __EXT4_DIR_REC_LEN(1)) {
- BUFFER_TRACE(dir_block, "get_write_access");
- err = ext4_journal_get_write_access(handle, dir_block);
- if (err)
- goto out_journal;
-
- journal = 1;
-- de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
-+ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
- }
-
-- len -= EXT4_DIR_REC_LEN(1);
-- assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
-+ len -= EXT4_DIR_REC_LEN(de);
-+ data = ext4_dentry_get_data(dir->i_sb,
-+ (struct ext4_dentry_param *) dentry->d_fsdata);
-+ if (data)
-+ dlen = *data + 1;
-+ assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
-+
- de = (struct ext4_dir_entry_2 *)
- ((char *) de + le16_to_cpu(de->rec_len));
- if (!journal) {
-@@ -1564,10 +1621,15 @@ static int ext4_update_dotdot(handle_t *
- if (len > 0)
- de->rec_len = cpu_to_le16(len);
- else
-- assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
-+ assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
- de->name_len = 2;
- strcpy (de->name, "..");
-- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+ if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
-+ de->name[2] = 0;
-+ memcpy(&de->name[2 + 1], data, *data);
-+ ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+ de->file_type |= EXT4_DIRENT_LUFID;
-+ }
-
- out_journal:
- if (journal) {
-@@ -1993,12 +2055,13 @@ retry:
- /* Initialize @inode as a subdirectory of @dir, and add the
- * "." and ".." entries into the first directory block. */
- int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
-- struct inode *inode)
-+ struct inode *inode,
-+ const void *data1, const void *data2)
- {
- struct buffer_head * dir_block;
- struct ext4_dir_entry_2 * de;
- unsigned int blocksize = dir->i_sb->s_blocksize;
-- int err = 0;
-+ int err = 0, dot_reclen;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-@@ -2019,17 +2082,32 @@ int ext4_add_dot_dotdot(handle_t *handle
- de = (struct ext4_dir_entry_2 *) dir_block->b_data;
- de->inode = cpu_to_le32(inode->i_ino);
- de->name_len = 1;
-- de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
-- blocksize);
- strcpy(de->name, ".");
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+ /* get packed fid data*/
-+ data1 = ext4_dentry_get_data(dir->i_sb,
-+ (struct ext4_dentry_param *) data1);
-+ if (data1) {
-+ de->name[1] = 0;
-+ memcpy(&de->name[2], data1, *(char *) data1);
-+ de->file_type |= EXT4_DIRENT_LUFID;
-+ }
-+ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
-+ dot_reclen = cpu_to_le16(de->rec_len);
- de = ext4_next_entry(de, blocksize);
- de->inode = cpu_to_le32(dir->i_ino);
-- de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
-+ de->rec_len = ext4_rec_len_to_disk(blocksize - dot_reclen,
- blocksize);
- de->name_len = 2;
- strcpy(de->name, "..");
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-+ data2 = ext4_dentry_get_data(dir->i_sb,
-+ (struct ext4_dentry_param *) data2);
-+ if (data2) {
-+ de->name[2] = 0;
-+ memcpy(&de->name[3], data2, *(char *) data2);
-+ de->file_type |= EXT4_DIRENT_LUFID;
-+ }
- inode->i_nlink = 2;
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, dir_block);
-@@ -2068,7 +2146,7 @@ retry:
- if (IS_ERR(inode))
- goto out_stop;
-
-- err = ext4_add_dot_dotdot(handle, dir, inode);
-+ err = ext4_add_dot_dotdot(handle, dir, inode, NULL, NULL);
- if (err)
- goto out_clear_inode;
-
-@@ -2107,7 +2185,7 @@ static int empty_dir(struct inode *inode
- int err = 0;
-
- sb = inode->i_sb;
-- if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
-+ if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2) ||
- !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
- if (err)
- ext4_error(inode->i_sb,
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/super.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-@@ -1203,6 +1203,7 @@ enum {
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
- Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-+ Opt_dirdata,
- Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
- Opt_stripe, Opt_delalloc, Opt_nodelalloc,
- Opt_block_validity, Opt_noblock_validity,
-@@ -1259,6 +1260,7 @@ static const match_table_t tokens = {
- {Opt_noquota, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
-+ {Opt_dirdata, "dirdata"},
- {Opt_barrier, "barrier=%u"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
-@@ -1634,6 +1636,9 @@ set_qf_format:
- else
- clear_opt(sbi->s_mount_opt, BARRIER);
- break;
-+ case Opt_dirdata:
-+ set_opt(sbi->s_mount_opt, DIRDATA);
-+ break;
- case Opt_ignore:
- break;
- case Opt_resize:
+++ /dev/null
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-@@ -873,7 +873,8 @@ struct ext4_inode_info {
- /*
- * Mount flags
- */
--#define EXT4_MOUNT_OLDALLOC 0x00002 /* Don't use the new Orlov allocator */
-+#define EXT4_MOUNT_NO_MBCACHE 0x00001 /* Disable mbcache */
-+#define EXT4_MOUNT_OLDALLOC 0x00002 /* Don't use the new Orlov allocator */
- #define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */
- #define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */
- #define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/super.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-@@ -1252,6 +1252,7 @@ enum {
- Opt_stripe, Opt_delalloc, Opt_nodelalloc,
- Opt_block_validity, Opt_noblock_validity,
- Opt_inode_readahead_blks, Opt_journal_ioprio,
-+ Opt_no_mbcache,
- Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
- };
-
-@@ -1320,6 +1321,7 @@ static const match_table_t tokens = {
- {Opt_auto_da_alloc, "auto_da_alloc=%u"},
- {Opt_auto_da_alloc, "auto_da_alloc"},
- {Opt_noauto_da_alloc, "noauto_da_alloc"},
-+ {Opt_no_mbcache, "no_mbcache"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_init_itable, "init_itable=%u"},
-@@ -1780,6 +1782,9 @@ set_qf_format:
- case Opt_noinit_itable:
- clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
- break;
-+ case Opt_no_mbcache:
-+ set_opt(sbi->s_mount_opt, NO_MBCACHE);
-+ break;
- default:
- ext4_msg(sb, KERN_ERR,
- "Unrecognized mount option \"%s\" "
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/xattr.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/xattr.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/xattr.c
-@@ -86,7 +86,8 @@
- # define ea_bdebug(f...)
- #endif
-
--static void ext4_xattr_cache_insert(struct buffer_head *);
-+static void ext4_xattr_cache_insert(struct super_block *,
-+ struct buffer_head *);
- static struct buffer_head *ext4_xattr_cache_find(struct inode *,
- struct ext4_xattr_header *,
- struct mb_cache_entry **);
-@@ -333,7 +334,7 @@ bad_block:
- error = -EIO;
- goto cleanup;
- }
-- ext4_xattr_cache_insert(bh);
-+ ext4_xattr_cache_insert(inode->i_sb, bh);
- entry = BFIRST(bh);
- error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
- inode);
-@@ -492,7 +493,7 @@ ext4_xattr_block_list(struct inode *inod
- error = -EIO;
- goto cleanup;
- }
-- ext4_xattr_cache_insert(bh);
-+ ext4_xattr_cache_insert(inode->i_sb, bh);
- error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
-
- cleanup:
-@@ -589,7 +590,9 @@ ext4_xattr_release_block(handle_t *handl
- struct mb_cache_entry *ce = NULL;
- int error = 0;
-
-- ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
-+ if (!test_opt(inode->i_sb, NO_MBCACHE))
-+ ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev,
-+ bh->b_blocknr);
- error = ext4_journal_get_write_access(handle, bh);
- if (error)
- goto out;
-@@ -988,8 +991,10 @@ ext4_xattr_block_set(handle_t *handle, s
- #define header(x) ((struct ext4_xattr_header *)(x))
-
- if (s->base) {
-- ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
-- bs->bh->b_blocknr);
-+ if (!test_opt(inode->i_sb, NO_MBCACHE))
-+ ce = mb_cache_entry_get(ext4_xattr_cache,
-+ bs->bh->b_bdev,
-+ bs->bh->b_blocknr);
- error = ext4_journal_get_write_access(handle, bs->bh);
- if (error)
- goto cleanup;
-@@ -1006,7 +1011,7 @@ ext4_xattr_block_set(handle_t *handle, s
- if (!IS_LAST_ENTRY(s->first))
- ext4_xattr_rehash(header(s->base),
- s->here);
-- ext4_xattr_cache_insert(bs->bh);
-+ ext4_xattr_cache_insert(sb, bs->bh);
- }
- unlock_buffer(bs->bh);
- if (error == -EIO)
-@@ -1089,7 +1094,8 @@ inserted:
- if (error)
- goto cleanup_dquot;
- }
-- mb_cache_entry_release(ce);
-+ if (ce)
-+ mb_cache_entry_release(ce);
- ce = NULL;
- } else if (bs->bh && s->base == bs->bh->b_data) {
- /* We were modifying this block in-place. */
-@@ -1140,7 +1146,7 @@ getblk_failed:
- memcpy(new_bh->b_data, s->base, new_bh->b_size);
- set_buffer_uptodate(new_bh);
- unlock_buffer(new_bh);
-- ext4_xattr_cache_insert(new_bh);
-+ ext4_xattr_cache_insert(sb, new_bh);
- error = ext4_handle_dirty_metadata(handle,
- inode, new_bh);
- if (error)
-@@ -1857,12 +1863,15 @@ ext4_xattr_put_super(struct super_block
- * Returns 0, or a negative error number on failure.
- */
- static void
--ext4_xattr_cache_insert(struct buffer_head *bh)
-+ext4_xattr_cache_insert(struct super_block *sb, struct buffer_head *bh)
- {
- __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
- struct mb_cache_entry *ce;
- int error;
-
-+ if (test_opt(sb, NO_MBCACHE))
-+ return;
-+
- ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
- if (!ce) {
- ea_bdebug(bh, "out of memory");
-@@ -1935,6 +1944,8 @@ ext4_xattr_cache_find(struct inode *inod
- __u32 hash = le32_to_cpu(header->h_hash);
- struct mb_cache_entry *ce;
-
-+ if (test_opt(inode->i_sb, NO_MBCACHE))
-+ return NULL;
- if (!header->h_hash)
- return NULL; /* never share */
- ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+++ /dev/null
-Index: linux-stage/fs/ext4/super.c
-When ldiskfs run in failover mode whith read-only disk.
-Part of allocation updates are lost and ldiskfs may fail
-while mounting this is due to inconsistent state of
-group-descriptor. Group-descriptor check is added after
-journal replay.
-===================================================================
---- linux-stage/fs/ext4/super.c 2016-11-24 20:50:46.736527130 +0530
-+++ linux-stage.orig/fs/ext4/super.c 2016-11-24 20:54:14.941779453 +0530
-@@ -3429,10 +3429,6 @@
- goto failed_mount2;
- }
- }
-- if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
-- ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-- goto failed_mount2;
-- }
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
- if (!ext4_fill_flex_info(sb)) {
- ext4_msg(sb, KERN_ERR,
-@@ -3609,6 +3605,10 @@
- sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
-
- no_journal:
-+ if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
-+ ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-+ goto failed_mount_wq;
-+ }
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
- ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
+++ /dev/null
-commit 4538821993f4486c76090dfb377c60c0a0e71ba3
-Author: Theodore Ts'o <tytso@mit.edu>
-Date: Thu Jul 29 15:06:10 2010 -0400
-
- ext4: drop inode from orphan list if ext4_delete_inode() fails
-
- There were some error paths in ext4_delete_inode() which was not
- dropping the inode from the orphan list. This could lead to a BUG_ON
- on umount when the orphan list is discovered to be non-empty.
-
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Signed-off-by: Wang Shilong <wshilong@ddn.com>
---- linux-stage.orig/fs/ext4/inode.c 2014-10-20 20:13:39.689001531 +0800
-+++ linux-stage/fs/ext4/inode.c 2014-10-20 20:12:14.224997168 +0800
-@@ -279,6 +279,7 @@
- "couldn't extend journal (err %d)", err);
- stop_handle:
- ext4_journal_stop(handle);
-+ ext4_orphan_del(NULL, inode);
- sb_end_intwrite(inode->i_sb);
- goto no_delete;
- }
+++ /dev/null
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -4352,7 +4352,7 @@ static void ext4_mb_add_n_trim(struct ex
- /* The max size of hash table is PREALLOC_TB_SIZE */
- order = PREALLOC_TB_SIZE - 1;
- /* Add the prealloc space to lg */
-- rcu_read_lock();
-+ spin_lock(&lg->lg_prealloc_lock);
- list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
- pa_inode_list) {
- spin_lock(&tmp_pa->pa_lock);
-@@ -4376,7 +4376,7 @@ static void ext4_mb_add_n_trim(struct ex
- if (!added)
- list_add_tail_rcu(&pa->pa_inode_list,
- &lg->lg_prealloc_list[order]);
-- rcu_read_unlock();
-+ spin_unlock(&lg->lg_prealloc_lock);
-
- /* Now trim the list to be not more than 8 elements */
- if (lg_prealloc_count > 8) {
+++ /dev/null
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c 2012-11-21 11:22:19.000000000 +0200
-+++ linux-stage/fs/ext4/mballoc.c 2012-11-21 11:24:33.000000000 +0200
-@@ -2622,6 +2622,9 @@ int ext4_mb_release(struct super_block *
- struct ext4_group_info *grinfo;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-+ if (sbi->s_proc)
-+ remove_proc_entry("mb_groups", sbi->s_proc);
-+
- if (sbi->s_group_info) {
- for (i = 0; i < ngroups; i++) {
- grinfo = ext4_get_group_info(sb, i);
-@@ -2673,7 +2676,6 @@ int ext4_mb_release(struct super_block *
-
- free_percpu(sbi->s_locality_groups);
- if (sbi->s_proc) {
-- remove_proc_entry("mb_groups", sbi->s_proc);
- remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
- }
-
-@@ -4801,6 +4803,11 @@ do_more:
- * be used until this transaction is committed
- */
- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-+ if (!new_entry) {
-+ ext4_mb_release_desc(&e4b);
-+ err = -ENOMEM;
-+ goto error_return;
-+ }
- new_entry->efd_start_blk = bit;
- new_entry->efd_group = block_group;
- new_entry->efd_count = count;
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2012-08-07 14:16:06.331203480 -0700
-+++ linux-stage/fs/ext4/ext4.h 2012-08-10 10:08:47.854206335 -0700
-@@ -713,6 +713,7 @@
- /* following fields for parallel directory operations -bzzz */
- struct semaphore i_append_sem;
-
-+ ext4_lblk_t i_dir_start_lookup;
- /*
- * i_block_group is the number of the block group which contains
- * this file's inode. Constant across the lifetime of the inode,
-@@ -724,7 +725,6 @@
- unsigned long i_state_flags; /* Dynamic state flags */
- unsigned long i_flags;
-
-- ext4_lblk_t i_dir_start_lookup;
- #ifdef CONFIG_EXT4_FS_XATTR
- /*
- * Extended attributes can be read independently of the main file
-@@ -788,10 +788,12 @@
- unsigned int i_reserved_data_blocks;
- unsigned int i_reserved_meta_blocks;
- unsigned int i_allocated_meta_blocks;
-- unsigned short i_delalloc_reserved_flag;
- sector_t i_da_metadata_calc_last_lblock;
- int i_da_metadata_calc_len;
-
-+ /* allocation reservation info for delalloc */
-+ unsigned short i_delalloc_reserved_flag;
-+
- /* on-disk additional length */
- __u16 i_extra_isize;
-
-@@ -807,16 +809,22 @@
- /* current io_end structure for async DIO write*/
- ext4_io_end_t *cur_aio_dio;
- atomic_t i_aiodio_unwritten; /* Number of inflight conversions pending */
-- struct mutex i_aio_mutex; /* big hammer for unaligned AIO */
-
- /*
- * Transactions that contain inode's metadata needed to complete
- * fsync and fdatasync, respectively.
- */
-+
- tid_t i_sync_tid;
-- tid_t i_datasync_tid;
-+
-+ struct mutex i_aio_mutex; /* big hammer for unaligned AIO */
-
- __u64 i_fs_version;
-+ /*
-+ * Transactions that contain inode's metadata needed to complete
-+ * fsync and fdatasync, respectively.
-+ */
-+ tid_t i_datasync_tid;
- };
-
- #define HAVE_DISK_INODE_VERSION
+++ /dev/null
-From 9933fc0ac1ac14b795819cd63d05ea92112f690a Mon Sep 17 00:00:00 2001
-From: Theodore Ts'o <tytso@mit.edu>
-Date: Mon, 1 Aug 2011 08:45:02 -0400
-Subject: ext4: introduce ext4_kvmalloc(), ext4_kzalloc(), and ext4_kvfree()
-Git-commit: 9933fc0a
-Patch-mainline: v3.1-rc1
-
-Introduce new helper functions which try kmalloc, and then fall back
-to vmalloc if necessary, and use them for allocating and deallocating
-s_flex_groups.
-
-Upstream-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Signed-off-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/ext4/ext4.h | 3 +++
- fs/ext4/super.c | 54 ++++++++++++++++++++++++++++++++++++------------------
- 2 files changed, 39 insertions(+), 18 deletions(-)
-
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1686,6 +1686,9 @@ extern int ext4_group_extend(struct supe
- ext4_fsblk_t n_blocks_count);
-
- /* super.c */
-+extern void *ext4_kvmalloc(size_t size, gfp_t flags);
-+extern void *ext4_kvzalloc(size_t size, gfp_t flags);
-+extern void ext4_kvfree(void *ptr);
- extern void __ext4_error(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
- #define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -80,6 +80,35 @@ static void ext4_clear_request_list(void
-
- wait_queue_head_t aio_wq[WQ_HASH_SZ];
-
-+void *ext4_kvmalloc(size_t size, gfp_t flags)
-+{
-+ void *ret;
-+
-+ ret = kmalloc(size, flags | __GFP_NOWARN);
-+ if (!ret)
-+ ret = __vmalloc(size, flags, PAGE_KERNEL);
-+ return ret;
-+}
-+
-+void *ext4_kvzalloc(size_t size, gfp_t flags)
-+{
-+ void *ret;
-+
-+ ret = kzalloc(size, flags | __GFP_NOWARN);
-+ if (!ret)
-+ ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
-+ return ret;
-+}
-+
-+void ext4_kvfree(void *ptr)
-+{
-+ if (is_vmalloc_addr(ptr))
-+ vfree(ptr);
-+ else
-+ kfree(ptr);
-+
-+}
-+
- ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *bg)
- {
-@@ -677,10 +706,7 @@ static void ext4_put_super(struct super_
- for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
-- if (is_vmalloc_addr(sbi->s_flex_groups))
-- vfree(sbi->s_flex_groups);
-- else
-- kfree(sbi->s_flex_groups);
-+ ext4_kvfree(sbi->s_flex_groups);
- percpu_counter_destroy(&sbi->s_freeblocks_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
-@@ -1815,15 +1841,11 @@ static int ext4_fill_flex_info(struct su
- ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
- EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
- size = flex_group_count * sizeof(struct flex_groups);
-- sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
-+ sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
- if (sbi->s_flex_groups == NULL) {
-- sbi->s_flex_groups = vzalloc(size);
-- if (sbi->s_flex_groups == NULL) {
-- ext4_msg(sb, KERN_ERR,
-- "not enough memory for %u flex groups",
-- flex_group_count);
-- goto failed;
-- }
-+ ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
-+ flex_group_count);
-+ goto failed;
- }
-
- for (i = 0; i < sbi->s_groups_count; i++) {
-@@ -3464,12 +3486,8 @@ failed_mount_wq:
- sbi->s_journal = NULL;
- }
- failed_mount3:
-- if (sbi->s_flex_groups) {
-- if (is_vmalloc_addr(sbi->s_flex_groups))
-- vfree(sbi->s_flex_groups);
-- else
-- kfree(sbi->s_flex_groups);
-- }
-+ if (sbi->s_flex_groups)
-+ ext4_kvfree(sbi->s_flex_groups);
- percpu_counter_destroy(&sbi->s_freeblocks_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
+++ /dev/null
-commit 18aadd47f88464928b5ce57791c2e8f9f2aaece0 (v3.3-rc2-7-g18aadd4)
-Author: Bobi Jam <bobijam@whamcloud.com>
-Date: Mon Feb 20 17:53:02 2012 -0500
-
-ext4: expand commit callback and use it for mballoc
-
-The per-commit callback was used by mballoc code to manage free space
-bitmaps after deleted blocks have been released. This patch expands
-it to support multiple different callbacks, to allow other things to
-be done after the commit has been completed.
-
-Signed-off-by: Bobi Jam <bobijam@whamcloud.com>
-Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4_jbd2.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4_jbd2.h
-@@ -104,6 +104,80 @@
- #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
- #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
-
-+/**
-+ * struct ext4_journal_cb_entry - Base structure for callback information.
-+ *
-+ * This struct is a 'seed' structure for a using with your own callback
-+ * structs. If you are using callbacks you must allocate one of these
-+ * or another struct of your own definition which has this struct
-+ * as it's first element and pass it to ext4_journal_callback_add().
-+ */
-+struct ext4_journal_cb_entry {
-+ /* list information for other callbacks attached to the same handle */
-+ struct list_head jce_list;
-+
-+ /* Function to call with this callback structure */
-+ void (*jce_func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce, int error);
-+
-+ /* user data goes here */
-+};
-+
-+/**
-+ * ext4_journal_callback_add: add a function to call after transaction commit
-+ * @handle: active journal transaction handle to register callback on
-+ * @func: callback function to call after the transaction has committed:
-+ * @sb: superblock of current filesystem for transaction
-+ * @jce: returned journal callback data
-+ * @rc: journal state at commit (0 = transaction committed properly)
-+ * @jce: journal callback data (internal and function private data struct)
-+ *
-+ * The registered function will be called in the context of the journal thread
-+ * after the transaction for which the handle was created has completed.
-+ *
-+ * No locks are held when the callback function is called, so it is safe to
-+ * call blocking functions from within the callback, but the callback should
-+ * not block or run for too long, or the filesystem will be blocked waiting for
-+ * the next transaction to commit. No journaling functions can be used, or
-+ * there is a risk of deadlock.
-+ *
-+ * There is no guaranteed calling order of multiple registered callbacks on
-+ * the same transaction.
-+ */
-+static inline void ext4_journal_callback_add(handle_t *handle,
-+ void (*func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc),
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ /* Add the jce to transaction's private list */
-+ jce->jce_func = func;
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&jce->jce_list, &handle->h_transaction->t_private_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+/**
-+ * ext4_journal_callback_del: delete a registered callback
-+ * @handle: active journal transaction handle on which callback was registered
-+ * @jce: registered journal callback entry to unregister
-+ */
-+static inline void ext4_journal_callback_del(handle_t *handle,
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ spin_lock(&sbi->s_md_lock);
-+ list_del_init(&jce->jce_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+#define HAVE_EXT4_JOURNAL_CALLBACK_ADD
-+
- int
- ext4_mark_iloc_dirty(handle_t *handle,
- struct inode *inode,
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/mballoc.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/mballoc.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/mballoc.h
-@@ -96,23 +96,24 @@ extern u8 mb_enable_debug;
- */
- #define MB_DEFAULT_GROUP_PREALLOC 512
-
--
- struct ext4_free_data {
-- /* this links the free block information from group_info */
-- struct rb_node node;
-+ /* MUST be the first member */
-+ struct ext4_journal_cb_entry efd_jce;
-
-- /* this links the free block information from ext4_sb_info */
-- struct list_head list;
-+ /* ext4_free_data private data starts from here */
-+
-+ /* this links the free block information from group_info */
-+ struct rb_node efd_node;
-
- /* group which free block extent belongs */
-- ext4_group_t group;
-+ ext4_group_t efd_group;
-
- /* free block extent */
-- ext4_grpblk_t start_blk;
-- ext4_grpblk_t count;
-+ ext4_grpblk_t efd_start_blk;
-+ ext4_grpblk_t efd_count;
-
- /* transaction which freed this extent */
-- tid_t t_tid;
-+ tid_t efd_tid;
- };
-
- struct ext4_prealloc_space {
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/mballoc.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/mballoc.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/mballoc.c
-@@ -21,6 +21,7 @@
- * mballoc.c contains the multiblocks allocation routines
- */
-
-+#include "ext4_jbd2.h"
- #include "mballoc.h"
- #include <linux/debugfs.h>
- #include <trace/events/ext4.h>
-@@ -336,12 +337,12 @@
- */
- static struct kmem_cache *ext4_pspace_cachep;
- static struct kmem_cache *ext4_ac_cachep;
--static struct kmem_cache *ext4_free_ext_cachep;
-+static struct kmem_cache *ext4_free_data_cachep;
- static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group);
- static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group);
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
-+static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error);
-
- static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
- {
-@@ -2581,8 +2582,6 @@ int ext4_mb_init(struct super_block *sb,
- }
- }
-
-- if (sbi->s_journal)
-- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
- return 0;
- }
-
-@@ -2684,58 +2683,54 @@ static inline int ext4_issue_discard(str
- * This function is called by the jbd2 layer once the commit has finished,
- * so we know we can free the blocks that were released with that commit.
- */
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
-+static void ext4_free_data_callback(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc)
- {
-- struct super_block *sb = journal->j_private;
-+ struct ext4_free_data *entry = (struct ext4_free_data *)jce;
- struct ext4_buddy e4b;
- struct ext4_group_info *db;
- int err, count = 0, count2 = 0;
-- struct ext4_free_data *entry;
-- struct list_head *l, *ltmp;
-
-- list_for_each_safe(l, ltmp, &txn->t_private_list) {
-- entry = list_entry(l, struct ext4_free_data, list);
-+ mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-+ entry->efd_count, entry->efd_group, entry);
-
-- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-- entry->count, entry->group, entry);
-+ if (test_opt(sb, DISCARD))
-+ ext4_issue_discard(sb, entry->efd_group,
-+ entry->efd_start_blk, entry->efd_count);
-+
-+ err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ db = e4b.bd_info;
-+ /* there are blocks to put in buddy to make them really free */
-+ count += entry->efd_count;
-+ count2++;
-+ ext4_lock_group(sb, entry->efd_group);
-+ /* Take it out of per group rb tree */
-+ rb_erase(&entry->efd_node, &(db->bb_free_root));
-+ mb_free_blocks(NULL, &e4b, entry->efd_start_blk, entry->efd_count);
-
-- if (test_opt(sb, DISCARD))
-- ext4_issue_discard(sb, entry->group,
-- entry->start_blk, entry->count);
--
-- err = ext4_mb_load_buddy(sb, entry->group, &e4b);
-- /* we expect to find existing buddy because it's pinned */
-- BUG_ON(err != 0);
--
-- db = e4b.bd_info;
-- /* there are blocks to put in buddy to make them really free */
-- count += entry->count;
-- count2++;
-- ext4_lock_group(sb, entry->group);
-- /* Take it out of per group rb tree */
-- rb_erase(&entry->node, &(db->bb_free_root));
-- mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
-+ /*
-+ * Clear the trimmed flag for the group so that the next
-+ * ext4_trim_fs can trim it.
-+ * If the volume is mounted with -o discard, online discard
-+ * is supported and the free blocks will be trimmed online.
-+ */
-+ if (!test_opt(sb, DISCARD))
-+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
-
-- /*
-- * Clear the trimmed flag for the group so that the next
-- * ext4_trim_fs can trim it.
-- * If the volume is mounted with -o discard, online discard
-- * is supported and the free blocks will be trimmed online.
-+ if (!db->bb_free_root.rb_node) {
-+ /* No more items in the per group rb tree
-+ * balance refcounts from ext4_mb_free_metadata()
- */
-- if (!test_opt(sb, DISCARD))
-- EXT4_MB_GRP_CLEAR_TRIMMED(db);
--
-- if (!db->bb_free_root.rb_node) {
-- /* No more items in the per group rb tree
-- * balance refcounts from ext4_mb_free_metadata()
-- */
-- page_cache_release(e4b.bd_buddy_page);
-- page_cache_release(e4b.bd_bitmap_page);
-- }
-- ext4_unlock_group(sb, entry->group);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-- ext4_mb_release_desc(&e4b);
-+ page_cache_release(e4b.bd_buddy_page);
-+ page_cache_release(e4b.bd_bitmap_page);
- }
-+ ext4_unlock_group(sb, entry->efd_group);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
-+ ext4_mb_release_desc(&e4b);
-
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
- }
-@@ -2787,22 +2782,22 @@ int __init init_ext4_mballoc(void)
- kmem_cache_create("ext4_alloc_context",
- sizeof(struct ext4_allocation_context),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_ac_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- return -ENOMEM;
-- }
-+ if (ext4_ac_cachep == NULL)
-+ goto out_err;
-+
-+ ext4_free_data_cachep =
-+ KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT);
-+ if (ext4_free_data_cachep == NULL)
-+ goto out1_err;
-
-- ext4_free_ext_cachep =
-- kmem_cache_create("ext4_free_block_extents",
-- sizeof(struct ext4_free_data),
-- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_free_ext_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- kmem_cache_destroy(ext4_ac_cachep);
-- return -ENOMEM;
-- }
- ext4_create_debugfs_entry();
- return 0;
-+
-+out1_err:
-+ kmem_cache_destroy(ext4_ac_cachep);
-+out_err:
-+ kmem_cache_destroy(ext4_pspace_cachep);
-+ return -ENOMEM;
- }
-
- void exit_ext4_mballoc(void)
-@@ -2814,7 +2809,7 @@ void exit_ext4_mballoc(void)
- rcu_barrier();
- kmem_cache_destroy(ext4_pspace_cachep);
- kmem_cache_destroy(ext4_ac_cachep);
-- kmem_cache_destroy(ext4_free_ext_cachep);
-+ kmem_cache_destroy(ext4_free_data_cachep);
- ext4_remove_debugfs_entry();
- }
-
-@@ -3355,8 +3350,8 @@ static void ext4_mb_generate_from_freeli
- n = rb_first(&(grp->bb_free_root));
-
- while (n) {
-- entry = rb_entry(n, struct ext4_free_data, node);
-- mb_set_bits(bitmap, entry->start_blk, entry->count);
-+ entry = rb_entry(n, struct ext4_free_data, efd_node);
-+ mb_set_bits(bitmap, entry->efd_start_blk, entry->efd_count);
- n = rb_next(n);
- }
- return;
-@@ -4606,11 +4601,11 @@ out:
- * AND the blocks are associated with the same group.
- */
- static int can_merge(struct ext4_free_data *entry1,
-- struct ext4_free_data *entry2)
-+ struct ext4_free_data *entry2)
- {
-- if ((entry1->t_tid == entry2->t_tid) &&
-- (entry1->group == entry2->group) &&
-- ((entry1->start_blk + entry1->count) == entry2->start_blk))
-+ if ((entry1->efd_tid == entry2->efd_tid) &&
-+ (entry1->efd_group == entry2->efd_group) &&
-+ ((entry1->efd_start_blk + entry1->efd_count) == entry2->efd_start_blk))
- return 1;
- return 0;
- }
-@@ -4623,7 +4618,6 @@ ext4_mb_free_metadata(handle_t *handle,
- struct ext4_free_data *entry;
- struct ext4_group_info *db = e4b->bd_info;
- struct super_block *sb = e4b->bd_sb;
-- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct rb_node **n = &db->bb_free_root.rb_node, *node;
- struct rb_node *parent = NULL, *new_node;
-
-@@ -4631,8 +4625,8 @@ ext4_mb_free_metadata(handle_t *handle,
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
-
-- new_node = &new_entry->node;
-- block = new_entry->start_blk;
-+ new_node = &new_entry->efd_node;
-+ block = new_entry->efd_start_blk;
-
- if (!*n) {
- /* first free block exent. We need to
-@@ -4645,15 +4639,15 @@ ext4_mb_free_metadata(handle_t *handle,
- }
- while (*n) {
- parent = *n;
-- entry = rb_entry(parent, struct ext4_free_data, node);
-- if (block < entry->start_blk)
-+ entry = rb_entry(parent, struct ext4_free_data, efd_node);
-+ if (block < entry->efd_start_blk)
- n = &(*n)->rb_left;
-- else if (block >= (entry->start_blk + entry->count))
-+ else if (block >= (entry->efd_start_blk + entry->efd_count))
- n = &(*n)->rb_right;
- else {
- ext4_grp_locked_error(sb, e4b->bd_group, __func__,
- "Double free of blocks %d (%d %d)",
-- block, entry->start_blk, entry->count);
-+ block, entry->efd_start_blk, entry->efd_count);
- return 0;
- }
- }
-@@ -4664,34 +4658,29 @@ ext4_mb_free_metadata(handle_t *handle,
- /* Now try to see the extent can be merged to left and right */
- node = rb_prev(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(entry, new_entry)) {
-- new_entry->start_blk = entry->start_blk;
-- new_entry->count += entry->count;
-+ new_entry->efd_start_blk = entry->efd_start_blk;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
-
- node = rb_next(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(new_entry, entry)) {
-- new_entry->count += entry->count;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
- /* Add the extent to transaction's private list */
-- spin_lock(&sbi->s_md_lock);
-- list_add(&new_entry->list, &handle->h_transaction->t_private_list);
-- spin_unlock(&sbi->s_md_lock);
-+ ext4_journal_callback_add(handle, ext4_free_data_callback,
-+ &new_entry->efd_jce);
- return 0;
- }
-
-@@ -4825,11 +4814,11 @@ do_more:
- * blocks being freed are metadata. these blocks shouldn't
- * be used until this transaction is committed
- */
-- new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
-- new_entry->start_blk = bit;
-- new_entry->group = block_group;
-- new_entry->count = count;
-- new_entry->t_tid = handle->h_transaction->t_tid;
-+ new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-+ new_entry->efd_start_blk = bit;
-+ new_entry->efd_group = block_group;
-+ new_entry->efd_count = count;
-+ new_entry->efd_tid = handle->h_transaction->t_tid;
-
- ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count);
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/super.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/super.c
-@@ -338,6 +338,18 @@ void ext4_journal_abort_handle(const cha
-
- EXPORT_SYMBOL(ext4_journal_abort_handle);
-
-+static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
-+{
-+ struct super_block *sb = journal->j_private;
-+ int error = is_journal_aborted(journal);
-+ struct ext4_journal_cb_entry *jce, *tmp;
-+
-+ list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
-+ list_del_init(&jce->jce_list);
-+ jce->jce_func(sb, jce, error);
-+ }
-+}
-+
- /* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
-@@ -3500,6 +3517,8 @@ static int ext4_fill_super(struct super_
- ext4_count_dirs(sb));
- percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
-
-+ sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
-+
- no_journal:
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
+++ /dev/null
-removes static definition of dx_root struct. so that "." and ".." dirent can
-have extra data. This patch does not change any functionality but is required for
-ext4_data_in_dirent patch.
-
-Index: linux-2.6.32.i386/fs/ext4/namei.c
-===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/namei.c 2010-04-16 05:35:06.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/namei.c 2010-04-16 05:47:41.000000000 +0530
-@@ -115,22 +115,13 @@
- * hash version mod 4 should never be 0. Sincerely, the paranoia department.
- */
-
--struct dx_root
-+struct dx_root_info
- {
-- struct fake_dirent dot;
-- char dot_name[4];
-- struct fake_dirent dotdot;
-- char dotdot_name[4];
-- struct dx_root_info
-- {
-- __le32 reserved_zero;
-- u8 hash_version;
-- u8 info_length; /* 8 */
-- u8 indirect_levels;
-- u8 unused_flags;
-- }
-- info;
-- struct dx_entry entries[0];
-+ __le32 reserved_zero;
-+ u8 hash_version;
-+ u8 info_length; /* 8 */
-+ u8 indirect_levels;
-+ u8 unused_flags;
- };
-
- struct dx_node
-@@ -244,6 +235,16 @@
- * Future: use high four bits of block for coalesce-on-delete flags
- * Mask them off for now.
- */
-+struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
-+{
-+ /* get dotdot first */
-+ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
-+
-+ /* dx root info is after dotdot entry */
-+ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
-+
-+ return (struct dx_root_info *) de;
-+}
-
- static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
- {
-@@ -398,7 +399,7 @@
- {
- unsigned count, indirect;
- struct dx_entry *at, *entries, *p, *q, *m;
-- struct dx_root *root;
-+ struct dx_root_info * info;
- struct buffer_head *bh;
- struct dx_frame *frame = frame_in;
- u32 hash;
-@@ -406,17 +407,18 @@
- frame->bh = NULL;
- if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
- goto fail;
-- root = (struct dx_root *) bh->b_data;
-- if (root->info.hash_version != DX_HASH_TEA &&
-- root->info.hash_version != DX_HASH_HALF_MD4 &&
-- root->info.hash_version != DX_HASH_LEGACY) {
-+
-+ info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
-+ if (info->hash_version != DX_HASH_TEA &&
-+ info->hash_version != DX_HASH_HALF_MD4 &&
-+ info->hash_version != DX_HASH_LEGACY) {
- ext4_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
-- "#%lu", root->info.hash_version, dir->i_ino);
-+ "#%lu", info->hash_version, dir->i_ino);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
- }
-- hinfo->hash_version = root->info.hash_version;
-+ hinfo->hash_version = info->hash_version;
- if (hinfo->hash_version <= DX_HASH_TEA)
- hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
- hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-@@ -425,27 +427,26 @@
- ext4fs_dirhash(d_name->name, d_name->len, hinfo);
- hash = hinfo->hash;
-
-- if (root->info.unused_flags & 1) {
-+ if (info->unused_flags & 1) {
- ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
-- root->info.unused_flags);
-+ info->unused_flags);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
- }
-
-- if ((indirect = root->info.indirect_levels) > 1) {
-+ if ((indirect = info->indirect_levels) > 1) {
- ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
-- root->info.indirect_levels);
-+ info->indirect_levels);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
- }
-
-- entries = (struct dx_entry *) (((char *)&root->info) +
-- root->info.info_length);
-+ entries = (struct dx_entry *) (((char *)info) + info->info_length);
-
- if (dx_get_limit(entries) != dx_root_limit(dir,
-- root->info.info_length)) {
-+ info->info_length)) {
- ext4_warning(dir->i_sb, "dx entry: limit != root limit");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -525,10 +526,12 @@ fail:
-
- static void dx_release (struct dx_frame *frames)
- {
-+ struct dx_root_info *info;
- if (frames[0].bh == NULL)
- return;
-
-- if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
-+ info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
-+ if (info->indirect_levels)
- brelse(frames[1].bh);
- brelse(frames[0].bh);
- }
-@@ -1447,17 +1450,16 @@
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- struct buffer_head *bh2;
-- struct dx_root *root;
- struct dx_frame frames[2], *frame;
- struct dx_entry *entries;
-- struct ext4_dir_entry_2 *de, *de2;
-+ struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
- char *data1, *top;
- unsigned len;
- int retval;
- unsigned blocksize;
- struct dx_hash_info hinfo;
- ext4_lblk_t block;
-- struct fake_dirent *fde;
-+ struct dx_root_info *dx_info;
-
- blocksize = dir->i_sb->s_blocksize;
- dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
-@@ -1467,20 +1469,21 @@
- brelse(bh);
- return retval;
- }
-- root = (struct dx_root *) bh->b_data;
-+
-+ dot_de = (struct ext4_dir_entry_2 *) bh->b_data;
-+ dotdot_de = ext4_next_entry(dot_de, blocksize);
-
- /* The 0th block becomes the root, move the dirents out */
-- fde = &root->dotdot;
-- de = (struct ext4_dir_entry_2 *)((char *)fde +
-- ext4_rec_len_from_disk(fde->rec_len, blocksize));
-- if ((char *) de >= (((char *) root) + blocksize)) {
-+ de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
-+ ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
-+ if ((char *) de >= (((char *) dot_de) + blocksize)) {
- ext4_error(dir->i_sb,
- "invalid rec_len for '..' in inode %lu",
- dir->i_ino);
- brelse(bh);
- return -EIO;
- }
-- len = ((char *) root) + blocksize - (char *) de;
-+ len = ((char *) dot_de) + blocksize - (char *) de;
-
- /* Allocate new block for the 0th block's dirents */
- bh2 = ext4_append(handle, dir, &block, &retval);
-@@ -1499,19 +1502,23 @@
- de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
- blocksize);
- /* Initialize the root; the dot dirents already exist */
-- de = (struct ext4_dir_entry_2 *) (&root->dotdot);
-- de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
-- blocksize);
-- memset (&root->info, 0, sizeof(root->info));
-- root->info.info_length = sizeof(root->info);
-- root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
-- entries = root->entries;
-+ dotdot_de->rec_len = ext4_rec_len_to_disk(blocksize -
-+ le16_to_cpu(dot_de->rec_len), blocksize);
-+
-+ /* initialize hashing info */
-+ dx_info = dx_get_dx_info(dot_de);
-+ memset (dx_info, 0, sizeof(*dx_info));
-+ dx_info->info_length = sizeof(*dx_info);
-+ dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
-+
-+ entries = (void *)dx_info + sizeof(*dx_info);
-+
- dx_set_block(entries, 1);
- dx_set_count(entries, 1);
-- dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
-+ dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
-
- /* Initialize as for dx_probe */
-- hinfo.hash_version = root->info.hash_version;
-+ hinfo.hash_version = dx_info->hash_version;
- if (hinfo.hash_version <= DX_HASH_TEA)
- hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
- hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-@@ -1759,6 +1766,7 @@
- goto journal_error;
- brelse (bh2);
- } else {
-+ struct dx_root_info * info;
- dxtrace(printk(KERN_DEBUG
- "Creating second level index...\n"));
- memcpy((char *) entries2, (char *) entries,
-@@ -1768,7 +1776,9 @@
- /* Set up root */
- dx_set_count(entries, 1);
- dx_set_block(entries + 0, newblock);
-- ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
-+ info = dx_get_dx_info((struct ext4_dir_entry_2*)
-+ frames[0].bh->b_data);
-+ info->indirect_levels = 1;
-
- /* Add new access path frame */
- frame = frames + 1;
+++ /dev/null
-This INCOMPAT_LARGEDIR feature allows larger directories
-to be created in ldiskfs, both with directory sizes over
-2GB and and a maximum htree depth of 3 instead of the
-current limit of 2. These features are needed in order
-to exceed the current limit of approximately 10M entries
-in a single directory.
-
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-@@ -1344,6 +1344,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
- #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400
- #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000
-+#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000
-
- #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
-@@ -1354,7 +1355,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_INCOMPAT_FLEX_BG| \
- EXT4_FEATURE_INCOMPAT_EA_INODE| \
- EXT4_FEATURE_INCOMPAT_MMP| \
-- EXT4_FEATURE_INCOMPAT_DIRDATA)
-+ EXT4_FEATURE_INCOMPAT_DIRDATA| \
-+ EXT4_FEATURE_INCOMPAT_LARGEDIR)
-
- #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
-@@ -1612,6 +1614,17 @@ ext4_group_first_block_no(struct super_b
- */
- #define ERR_BAD_DX_DIR -75000
-
-+/* htree levels for ext4 */
-+#define EXT4_HTREE_LEVEL_COMPAT 2
-+#define EXT4_HTREE_LEVEL 3
-+
-+static inline int
-+ext4_dir_htree_level(struct super_block *sb)
-+{
-+ return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
-+ EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
-+}
-+
- void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
- ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
-
-@@ -2005,13 +2018,15 @@ static inline void ext4_r_blocks_count_s
- es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
- }
-
--static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
-+static inline loff_t ext4_isize(struct super_block *sb,
-+ struct ext4_inode *raw_inode)
- {
-- if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
-+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
-+ S_ISREG(le16_to_cpu(raw_inode->i_mode)))
- return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
- le32_to_cpu(raw_inode->i_size_lo);
-- else
-- return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
-+
-+ return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
- }
-
- static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/inode.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/inode.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/inode.c
-@@ -5470,7 +5470,7 @@ struct inode *ext4_iget(struct super_blo
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
- ei->i_file_acl |=
- ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
-- inode->i_size = ext4_isize(raw_inode);
-+ inode->i_size = ext4_isize(sb, raw_inode);
- ei->i_disksize = inode->i_size;
- #ifdef CONFIG_QUOTA
- ei->i_reserved_quota = 0;
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-@@ -225,7 +225,7 @@ struct dx_root_info * dx_get_dx_info(str
-
- static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
- {
-- return le32_to_cpu(entry->block) & 0x00ffffff;
-+ return le32_to_cpu(entry->block) & 0x0fffffff;
- }
-
- static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
-@@ -388,7 +388,7 @@ dx_probe(const struct qstr *d_name, stru
- struct dx_frame *frame = frame_in;
- u32 hash;
-
-- frame->bh = NULL;
-+ memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
- if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
- goto fail;
-
-@@ -418,9 +418,16 @@ dx_probe(const struct qstr *d_name, stru
- goto fail;
- }
-
-- if ((indirect = info->indirect_levels) > 1) {
-- ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
-- info->indirect_levels);
-+ indirect = info->indirect_levels;
-+ if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
-+ ext4_warning(dir->i_sb,
-+ "Directory (ino: %lu) htree depth %#06x exceed "
-+ "supported value", dir->i_ino,
-+ ext4_dir_htree_level(dir->i_sb));
-+ if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
-+ ext4_warning(dir->i_sb, "Enable large directory "
-+ "feature to access it");
-+ }
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
-@@ -512,13 +519,18 @@ fail:
- static void dx_release (struct dx_frame *frames)
- {
- struct dx_root_info *info;
-+ int i;
-+
- if (frames[0].bh == NULL)
- return;
-
- info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
-- if (info->indirect_levels)
-- brelse(frames[1].bh);
-- brelse(frames[0].bh);
-+ for (i = 0; i <= info->indirect_levels; i++) {
-+ if (frames[i].bh == NULL)
-+ break;
-+ brelse(frames[i].bh);
-+ frames[i].bh = NULL;
-+ }
- }
-
- /*
-@@ -661,7 +673,7 @@ int ext4_htree_fill_tree(struct file *di
- {
- struct dx_hash_info hinfo;
- struct ext4_dir_entry_2 *de;
-- struct dx_frame frames[2], *frame;
-+ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct inode *dir;
- ext4_lblk_t block;
- int count = 0;
-@@ -1003,7 +1015,7 @@ static struct buffer_head * ext4_dx_find
- struct super_block * sb;
- struct dx_hash_info hinfo;
- u32 hash;
-- struct dx_frame frames[2], *frame;
-+ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct ext4_dir_entry_2 *de, *top;
- struct buffer_head *bh;
- ext4_lblk_t block;
-@@ -1443,7 +1455,7 @@ static int add_dirent_to_buf(handle_t *h
- */
- dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
- ext4_update_dx_flag(dir);
-- dir->i_version++;
-+ inode_inc_iversion(dir);
- ext4_mark_inode_dirty(handle, dir);
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, dir, bh);
-@@ -1463,7 +1475,7 @@ static int make_indexed_dir(handle_t *ha
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- struct buffer_head *bh2;
-- struct dx_frame frames[2], *frame;
-+ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct dx_entry *entries;
- struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
- char *data1, *top;
-@@ -1712,15 +1724,18 @@ static int ext4_add_entry(handle_t *hand
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
- struct inode *inode)
- {
-- struct dx_frame frames[2], *frame;
-+ struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct dx_entry *entries, *at;
- struct dx_hash_info hinfo;
- struct buffer_head *bh;
- struct inode *dir = dentry->d_parent->d_inode;
- struct super_block *sb = dir->i_sb;
- struct ext4_dir_entry_2 *de;
-+ int restart;
- int err;
-
-+again:
-+ restart = 0;
- frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
- if (!frame)
- return err;
-@@ -1730,33 +1745,48 @@ static int ext4_dx_add_entry(handle_t *h
- if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
- goto cleanup;
-
-- BUFFER_TRACE(bh, "get_write_access");
-- err = ext4_journal_get_write_access(handle, bh);
-- if (err)
-- goto journal_error;
--
- err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (err != -ENOSPC)
- goto cleanup;
-
-+ err = 0;
- /* Block full, should compress but for now just split */
- dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
- dx_get_count(entries), dx_get_limit(entries)));
- /* Need to split index? */
- if (dx_get_count(entries) == dx_get_limit(entries)) {
- ext4_lblk_t newblock;
-- unsigned icount = dx_get_count(entries);
-- int levels = frame - frames;
-+ int levels = frame - frames + 1;
-+ unsigned icount;
-+ int add_level = 1;
- struct dx_entry *entries2;
- struct dx_node *node2;
- struct buffer_head *bh2;
-
-- if (levels && (dx_get_count(frames->entries) ==
-- dx_get_limit(frames->entries))) {
-- ext4_warning(sb, "Directory index full!");
-+ while (frame > frames) {
-+ if (dx_get_count((frame - 1)->entries) <
-+ dx_get_limit((frame - 1)->entries)) {
-+ add_level = 0;
-+ break;
-+ }
-+ frame--; /* split higher index block */
-+ at = frame->at;
-+ entries = frame->entries;
-+ restart = 1;
-+ }
-+ if (add_level && levels == ext4_dir_htree_level(sb)) {
-+ ext4_warning(sb, "Directory (ino: %lu) index full, "
-+ "reach max htree level :%d",
-+ dir->i_ino, levels);
-+ if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
-+ ext4_warning(sb, "Large directory feature is"
-+ "not enabled on this "
-+ "filesystem");
-+ }
- err = -ENOSPC;
- goto cleanup;
- }
-+ icount = dx_get_count(entries);
- bh2 = ext4_append (handle, dir, &newblock, &err);
- if (!(bh2))
- goto cleanup;
-@@ -1769,7 +1799,7 @@ static int ext4_dx_add_entry(handle_t *h
- err = ext4_journal_get_write_access(handle, frame->bh);
- if (err)
- goto journal_error;
-- if (levels) {
-+ if (!add_level) {
- unsigned icount1 = icount/2, icount2 = icount - icount1;
- unsigned hash2 = dx_get_hash(entries + icount1);
- dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
-@@ -1777,7 +1807,7 @@ static int ext4_dx_add_entry(handle_t *h
-
- BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
- err = ext4_journal_get_write_access(handle,
-- frames[0].bh);
-+ (frame - 1)->bh);
- if (err)
- goto journal_error;
-
-@@ -1793,18 +1823,24 @@ static int ext4_dx_add_entry(handle_t *h
- frame->entries = entries = entries2;
- swap(frame->bh, bh2);
- }
-- dx_insert_block(frames + 0, hash2, newblock);
-- dxtrace(dx_show_index("node", frames[1].entries));
-+ dx_insert_block((frame - 1), hash2, newblock);
-+ dxtrace(dx_show_index("node", frame->entries));
- dxtrace(dx_show_index("node",
- ((struct dx_node *) bh2->b_data)->entries));
- err = ext4_handle_dirty_metadata(handle, dir, bh2);
- if (err)
- goto journal_error;
- brelse (bh2);
-+ ext4_handle_dirty_metadata(handle, dir,
-+ (frame - 1)->bh);
-+ if (restart) {
-+ ext4_handle_dirty_metadata(handle, dir,
-+ frame->bh);
-+ goto cleanup;
-+ }
- } else {
- struct dx_root_info * info;
-- dxtrace(printk(KERN_DEBUG
-- "Creating second level index...\n"));
-+
- memcpy((char *) entries2, (char *) entries,
- icount * sizeof(struct dx_entry));
- dx_set_limit(entries2, dx_node_limit(dir));
-@@ -1814,19 +1850,16 @@ static int ext4_dx_add_entry(handle_t *h
- dx_set_block(entries + 0, newblock);
- info = dx_get_dx_info((struct ext4_dir_entry_2*)
- frames[0].bh->b_data);
-- info->indirect_levels = 1;
--
-- /* Add new access path frame */
-- frame = frames + 1;
-- frame->at = at = at - entries + entries2;
-- frame->entries = entries = entries2;
-- frame->bh = bh2;
-- err = ext4_journal_get_write_access(handle,
-- frame->bh);
-- if (err)
-- goto journal_error;
-+ info->indirect_levels += 1;
-+ dxtrace(printk(KERN_DEBUG
-+ "Creating %d level index...\n",
-+ info->indirect_levels));
-+ ext4_handle_dirty_metadata(handle, dir, frame->bh);
-+ ext4_handle_dirty_metadata(handle, dir, bh2);
-+ brelse(bh2);
-+ restart = 1;
-+ goto cleanup;
- }
-- err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
- if (err) {
- ext4_std_error(inode->i_sb, err);
- goto cleanup;
-@@ -1840,6 +1873,10 @@ cleanup:
- if (bh)
- brelse(bh);
- dx_release(frames);
-+ /* @restart is true means htree-path has been changed, we need to
-+ * repeat dx_probe() to find out valid htree-path */
-+ if (restart && err == 0)
-+ goto again;
- return err;
- }
-
-@@ -1874,7 +1911,7 @@ int ext4_delete_entry(handle_t *handle,
- blocksize);
- else
- de->inode = 0;
-- dir->i_version++;
-+ inode_inc_iversion(dir);
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, dir, bh);
- return 0;
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1329,6 +1329,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
- #define EXT4_FEATURE_INCOMPAT_MMP 0x0100
- #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
-+#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400
- #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000
-
- #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
-@@ -1338,6 +1339,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_INCOMPAT_EXTENTS| \
- EXT4_FEATURE_INCOMPAT_64BIT| \
- EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-+ EXT4_FEATURE_INCOMPAT_EA_INODE| \
- EXT4_FEATURE_INCOMPAT_MMP| \
- EXT4_FEATURE_INCOMPAT_DIRDATA)
-
-@@ -1706,6 +1714,10 @@ struct mmpd_data {
- # define ATTRIB_NORET __attribute__((noreturn))
- # define NORET_AND noreturn,
-
-+struct ext4_xattr_ino_array {
-+ unsigned int xia_count; /* # of used item in the array */
-+ unsigned int xia_inodes[0];
-+};
- /* bitmap.c */
- extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
-
-Index: linux-stage/fs/ext4/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext4/xattr.c
-+++ linux-stage/fs/ext4/xattr.c
-@@ -168,19 +168,26 @@ ext4_xattr_check_block(struct buffer_hea
- }
-
- static inline int
--ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
-+ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
-+ struct inode *inode)
- {
- size_t value_size = le32_to_cpu(entry->e_value_size);
-
-- if (entry->e_value_block != 0 || value_size > size ||
-+ if (!entry->e_value_inum &&
- le16_to_cpu(entry->e_value_offs) + value_size > size)
-+ return -EIO;
-+ if (entry->e_value_inum &&
-+ (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
-+ le32_to_cpu(entry->e_value_inum) >
-+ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
- return -EIO;
- return 0;
- }
-
- static int
- ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
-- const char *name, size_t size, int sorted)
-+ const char *name, size_t size, int sorted,
-+ struct inode *inode)
- {
- struct ext4_xattr_entry *entry;
- size_t name_len;
-@@ -200,11 +207,104 @@ ext4_xattr_find_entry(struct ext4_xattr_
- break;
- }
- *pentry = entry;
-- if (!cmp && ext4_xattr_check_entry(entry, size))
-+ if (!cmp && ext4_xattr_check_entry(entry, size, inode))
- return -EIO;
- return cmp ? -ENODATA : 0;
- }
-
-+/*
-+ * Read the EA value from an inode.
-+ */
-+static int
-+ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
-+{
-+ unsigned long block = 0;
-+ struct buffer_head *bh = NULL;
-+ int err, blocksize;
-+ size_t csize, ret_size = 0;
-+
-+ if (*size == 0)
-+ return 0;
-+
-+ blocksize = ea_inode->i_sb->s_blocksize;
-+
-+ while (ret_size < *size) {
-+ csize = (*size - ret_size) > blocksize ? blocksize :
-+ *size - ret_size;
-+ bh = ext4_bread(NULL, ea_inode, block, 0, &err);
-+ if (!bh) {
-+ *size = ret_size;
-+ return err;
-+ }
-+ memcpy(buf, bh->b_data, csize);
-+ brelse(bh);
-+
-+ buf += csize;
-+ block += 1;
-+ ret_size += csize;
-+ }
-+
-+ *size = ret_size;
-+
-+ return err;
-+}
-+
-+struct inode *ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, int *err)
-+{
-+ struct inode *ea_inode = NULL;
-+
-+ ea_inode = ext4_iget(parent->i_sb, ea_ino);
-+ if (IS_ERR(ea_inode) || is_bad_inode(ea_inode)) {
-+ int rc = IS_ERR(ea_inode) ? PTR_ERR(ea_inode) : 0;
-+ ext4_error(parent->i_sb, "error while reading EA inode %lu "
-+ "/ %d %d", ea_ino, rc, is_bad_inode(ea_inode));
-+ *err = rc != 0 ? rc : -EIO;
-+ return NULL;
-+ }
-+
-+ if (EXT4_XATTR_INODE_GET_PARENT(ea_inode) != parent->i_ino ||
-+ ea_inode->i_generation != parent->i_generation) {
-+ ext4_error(parent->i_sb, "Backpointer from EA inode %lu "
-+ "to parent invalid.", ea_ino);
-+ *err = -EINVAL;
-+ goto error;
-+ }
-+
-+ if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
-+ ext4_error(parent->i_sb, "EA inode %lu does not have "
-+ "EXT4_EA_INODE_FL flag set.\n", ea_ino);
-+ *err = -EINVAL;
-+ goto error;
-+ }
-+
-+ *err = 0;
-+ return ea_inode;
-+
-+error:
-+ iput(ea_inode);
-+ return NULL;
-+}
-+
-+/*
-+ * Read the value from the EA inode.
-+ */
-+static int
-+ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer,
-+ size_t *size)
-+{
-+ struct inode *ea_inode = NULL;
-+ int err;
-+
-+ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+ if (err)
-+ return err;
-+
-+ err = ext4_xattr_inode_read(ea_inode, buffer, size);
-+ iput(ea_inode);
-+
-+ return err;
-+}
-+
- static int
- ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
- void *buffer, size_t buffer_size)
-@@ -236,7 +335,8 @@ bad_block:
- }
- ext4_xattr_cache_insert(bh);
- entry = BFIRST(bh);
-- error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-+ error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
-+ inode);
- if (error == -EIO)
- goto bad_block;
- if (error)
-@@ -246,8 +346,16 @@ bad_block:
- error = -ERANGE;
- if (size > buffer_size)
- goto cleanup;
-- memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
-- size);
-+ if (entry->e_value_inum) {
-+ error = ext4_xattr_inode_get(inode,
-+ le32_to_cpu(entry->e_value_inum),
-+ buffer, &size);
-+ if (error)
-+ goto cleanup;
-+ } else {
-+ memcpy(buffer, bh->b_data +
-+ le16_to_cpu(entry->e_value_offs), size);
-+ }
- }
- error = size;
-
-@@ -281,7 +389,7 @@ ext4_xattr_ibody_get(struct inode *inode
- if (error)
- goto cleanup;
- error = ext4_xattr_find_entry(&entry, name_index, name,
-- end - (void *)entry, 0);
-+ end - (void *)entry, 0, inode);
- if (error)
- goto cleanup;
- size = le32_to_cpu(entry->e_value_size);
-@@ -289,8 +397,16 @@ ext4_xattr_ibody_get(struct inode *inode
- error = -ERANGE;
- if (size > buffer_size)
- goto cleanup;
-- memcpy(buffer, (void *)IFIRST(header) +
-- le16_to_cpu(entry->e_value_offs), size);
-+ if (entry->e_value_inum) {
-+ error = ext4_xattr_inode_get(inode,
-+ le32_to_cpu(entry->e_value_inum),
-+ buffer, &size);
-+ if (error)
-+ goto cleanup;
-+ } else {
-+ memcpy(buffer, (void *)IFIRST(header) +
-+ le16_to_cpu(entry->e_value_offs), size);
-+ }
- }
- error = size;
-
-@@ -513,7 +629,7 @@ static size_t ext4_xattr_free_space(stru
- {
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
- *total += EXT4_XATTR_LEN(last->e_name_len);
-- if (!last->e_value_block && last->e_value_size) {
-+ if (!last->e_value_inum && last->e_value_size) {
- size_t offs = le16_to_cpu(last->e_value_offs);
- if (offs < *min_offs)
- *min_offs = offs;
-@@ -522,11 +638,159 @@ static size_t ext4_xattr_free_space(stru
- return (*min_offs - ((void *)last - base) - sizeof(__u32));
- }
-
-+/*
-+ * Write the value of the EA in an inode.
-+ */
-+static int
-+ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
-+ const void *buf, int bufsize)
-+{
-+ struct buffer_head *bh = NULL, dummy;
-+ unsigned long block = 0;
-+ unsigned blocksize = ea_inode->i_sb->s_blocksize;
-+ unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
-+ int csize, wsize = 0;
-+ int ret = 0;
-+ int retries = 0;
-+
-+retry:
-+ while (ret >= 0 && ret < max_blocks) {
-+ block += ret;
-+ max_blocks -= ret;
-+
-+ ret = ext4_get_blocks(handle, ea_inode, block, max_blocks,
-+ &dummy, EXT4_GET_BLOCKS_CREATE);
-+ if (ret <= 0) {
-+ ext4_mark_inode_dirty(handle, ea_inode);
-+ if (ret == -ENOSPC &&
-+ ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
-+ ret = 0;
-+ goto retry;
-+ }
-+ break;
-+ }
-+ }
-+
-+ if (ret < 0)
-+ return ret;
-+
-+ block = 0;
-+ while (wsize < bufsize) {
-+ if (bh != NULL)
-+ brelse(bh);
-+ csize = (bufsize - wsize) > blocksize ? blocksize :
-+ bufsize - wsize;
-+ bh = ext4_getblk(handle, ea_inode, block, 0, &ret);
-+ if (!bh)
-+ goto out;
-+ ret = ext4_journal_get_write_access(handle, bh);
-+ if (ret)
-+ goto out;
-+
-+ memcpy(bh->b_data, buf, csize);
-+ set_buffer_uptodate(bh);
-+ ext4_handle_dirty_metadata(handle, ea_inode, bh);
-+
-+ buf += csize;
-+ wsize += csize;
-+ block += 1;
-+ }
-+
-+ i_size_write(ea_inode, wsize);
-+ ext4_update_i_disksize(ea_inode, wsize);
-+
-+ ext4_mark_inode_dirty(handle, ea_inode);
-+
-+out:
-+ brelse(bh);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Create an inode to store the value of a large EA.
-+ */
-+static struct inode *
-+ext4_xattr_inode_create(handle_t *handle, struct inode *inode)
-+{
-+ struct inode *ea_inode = NULL;
-+
-+ /*
-+ * Let the next inode be the goal, so we try and allocate the EA inode
-+ * in the same group, or nearby one.
-+ */
-+ ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-+ S_IFREG|0600, NULL, inode->i_ino + 1);
-+
-+ if (!IS_ERR(ea_inode)) {
-+ ea_inode->i_op = &ext4_file_inode_operations;
-+ ea_inode->i_fop = &ext4_file_operations;
-+ ext4_set_aops(ea_inode);
-+ ea_inode->i_generation = inode->i_generation;
-+ EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
-+
-+ /*
-+ * A back-pointer from EA inode to parent inode will be useful
-+ * for e2fsck.
-+ */
-+ EXT4_XATTR_INODE_SET_PARENT(ea_inode, inode->i_ino);
-+ unlock_new_inode(ea_inode);
-+ }
-+
-+ return ea_inode;
-+}
-+
-+/*
-+ * Unlink the inode storing the value of the EA.
-+ */
-+int
-+ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino)
-+{
-+ struct inode *ea_inode = NULL;
-+ int err;
-+
-+ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
-+ if (err)
-+ return err;
-+
-+ ea_inode->i_nlink = 0;
-+ iput(ea_inode);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Add value of the EA in an inode.
-+ */
-+static int
-+ext4_xattr_inode_set(handle_t *handle, struct inode *inode, unsigned long *ea_ino,
-+ const void *value, size_t value_len)
-+{
-+ struct inode *ea_inode = NULL;
-+ int err;
-+
-+ /* Create an inode for the EA value */
-+ ea_inode = ext4_xattr_inode_create(handle, inode);
-+ if (IS_ERR(ea_inode))
-+ return -1;
-+
-+ err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
-+ if (err)
-+ ea_inode->i_nlink = 0;
-+ else
-+ *ea_ino = ea_inode->i_ino;
-+
-+ iput(ea_inode);
-+
-+ return err;
-+}
-+
- struct ext4_xattr_info {
-- int name_index;
- const char *name;
- const void *value;
- size_t value_len;
-+ int name_index;
-+ int in_inode;
- };
-
- struct ext4_xattr_search {
-@@ -538,15 +802,23 @@ struct ext4_xattr_search {
- };
-
- static int
--ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
-+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
-+ handle_t *handle, struct inode *inode)
- {
- struct ext4_xattr_entry *last;
- size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-+ int in_inode = i->in_inode;
-+
-+ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+ EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+ (EXT4_XATTR_SIZE(i->value_len) >
-+ EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
-+ in_inode = 1;
-
- /* Compute min_offs and last. */
- last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
-- if (!last->e_value_block && last->e_value_size) {
-+ if (!last->e_value_inum && last->e_value_size) {
- size_t offs = le16_to_cpu(last->e_value_offs);
- if (offs < min_offs)
- min_offs = offs;
-@@ -554,16 +826,21 @@ ext4_xattr_set_entry(struct ext4_xattr_i
- }
- free = min_offs - ((void *)last - s->base) - sizeof(__u32);
- if (!s->not_found) {
-- if (!s->here->e_value_block && s->here->e_value_size) {
-+ if (!in_inode &&
-+ !s->here->e_value_inum && s->here->e_value_size) {
- size_t size = le32_to_cpu(s->here->e_value_size);
- free += EXT4_XATTR_SIZE(size);
- }
- free += EXT4_XATTR_LEN(name_len);
- }
- if (i->value) {
-- if (free < EXT4_XATTR_SIZE(i->value_len) ||
-- free < EXT4_XATTR_LEN(name_len) +
-- EXT4_XATTR_SIZE(i->value_len))
-+ size_t value_len = EXT4_XATTR_SIZE(i->value_len);
-+
-+ if (in_inode)
-+ value_len = 0;
-+
-+ if (free < value_len ||
-+ free < EXT4_XATTR_LEN(name_len) + value_len)
- return -ENOSPC;
- }
-
-@@ -577,7 +854,8 @@ ext4_xattr_set_entry(struct ext4_xattr_i
- s->here->e_name_len = name_len;
- memcpy(s->here->e_name, i->name, name_len);
- } else {
-- if (!s->here->e_value_block && s->here->e_value_size) {
-+ if (!s->here->e_value_inum && s->here->e_value_size &&
-+ s->here->e_value_offs > 0) {
- void *first_val = s->base + min_offs;
- size_t offs = le16_to_cpu(s->here->e_value_offs);
- void *val = s->base + offs;
-@@ -606,13 +884,18 @@ ext4_xattr_set_entry(struct ext4_xattr_i
- last = s->first;
- while (!IS_LAST_ENTRY(last)) {
- size_t o = le16_to_cpu(last->e_value_offs);
-- if (!last->e_value_block &&
-+ if (!last->e_value_inum &&
- last->e_value_size && o < offs)
- last->e_value_offs =
- cpu_to_le16(o + size);
- last = EXT4_XATTR_NEXT(last);
- }
- }
-+ if (s->here->e_value_inum) {
-+ ext4_xattr_inode_unlink(inode,
-+ le32_to_cpu(s->here->e_value_inum));
-+ s->here->e_value_inum = 0;
-+ }
- if (!i->value) {
- /* Remove the old name. */
- size_t size = EXT4_XATTR_LEN(name_len);
-@@ -626,10 +908,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i
- if (i->value) {
- /* Insert the new value. */
- s->here->e_value_size = cpu_to_le32(i->value_len);
-- if (i->value_len) {
-+ if (in_inode) {
-+ unsigned long ea_ino = le32_to_cpu(s->here->e_value_inum);
-+ ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
-+ i->value_len);
-+ s->here->e_value_inum = cpu_to_le32(ea_ino);
-+ s->here->e_value_offs = 0;
-+ } else if (i->value_len) {
- size_t size = EXT4_XATTR_SIZE(i->value_len);
- void *val = s->base + min_offs - size;
- s->here->e_value_offs = cpu_to_le16(min_offs - size);
-+ s->here->e_value_inum = 0;
- memset(val + size - EXT4_XATTR_PAD, 0,
- EXT4_XATTR_PAD); /* Clear the pad bytes. */
- memcpy(val, i->value, i->value_len);
-@@ -674,7 +963,7 @@ ext4_xattr_block_find(struct inode *inod
- bs->s.end = bs->bh->b_data + bs->bh->b_size;
- bs->s.here = bs->s.first;
- error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
-- i->name, bs->bh->b_size, 1);
-+ i->name, bs->bh->b_size, 1, inode);
- if (error && error != -ENODATA)
- goto cleanup;
- bs->s.not_found = error;
-@@ -698,8 +987,6 @@ ext4_xattr_block_set(handle_t *handle, s
-
- #define header(x) ((struct ext4_xattr_header *)(x))
-
-- if (i->value && i->value_len > sb->s_blocksize)
-- return -ENOSPC;
- if (s->base) {
- ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
- bs->bh->b_blocknr);
-@@ -714,7 +1001,7 @@ ext4_xattr_block_set(handle_t *handle, s
- ce = NULL;
- }
- ea_bdebug(bs->bh, "modifying in-place");
-- error = ext4_xattr_set_entry(i, s);
-+ error = ext4_xattr_set_entry(i, s, handle, inode);
- if (!error) {
- if (!IS_LAST_ENTRY(s->first))
- ext4_xattr_rehash(header(s->base),
-@@ -766,7 +1053,7 @@ ext4_xattr_block_set(handle_t *handle, s
- s->end = s->base + sb->s_blocksize;
- }
-
-- error = ext4_xattr_set_entry(i, s);
-+ error = ext4_xattr_set_entry(i, s, handle, inode);
- if (error == -EIO)
- goto bad_block;
- if (error)
-@@ -917,7 +1204,7 @@ ext4_xattr_ibody_find(struct inode *inod
- /* Find the named attribute. */
- error = ext4_xattr_find_entry(&is->s.here, i->name_index,
- i->name, is->s.end -
-- (void *)is->s.base, 0);
-+ (void *)is->s.base, 0, inode);
- if (error && error != -ENODATA)
- return error;
- is->s.not_found = error;
-@@ -936,7 +1223,7 @@ ext4_xattr_ibody_set(handle_t *handle, s
-
- if (EXT4_I(inode)->i_extra_isize == 0)
- return -ENOSPC;
-- error = ext4_xattr_set_entry(i, s);
-+ error = ext4_xattr_set_entry(i, s, handle, inode);
- if (error)
- return error;
- header = IHDR(inode, ext4_raw_inode(&is->iloc));
-@@ -972,7 +1259,7 @@ ext4_xattr_set_handle(handle_t *handle,
- .name = name,
- .value = value,
- .value_len = value_len,
--
-+ .in_inode = 0,
- };
- struct ext4_xattr_ibody_find is = {
- .s = { .not_found = -ENODATA, },
-@@ -1041,6 +1328,15 @@ ext4_xattr_set_handle(handle_t *handle,
- goto cleanup;
- }
- error = ext4_xattr_block_set(handle, inode, &i, &bs);
-+ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
-+ EXT4_FEATURE_INCOMPAT_EA_INODE) &&
-+ error == -ENOSPC) {
-+ /* xattr not fit to block, store at external
-+ * inode */
-+ i.in_inode = 1;
-+ error = ext4_xattr_ibody_set(handle, inode,
-+ &i, &is);
-+ }
- if (error)
- goto cleanup;
- if (!is.s.not_found) {
-@@ -1087,10 +1383,25 @@ ext4_xattr_set(struct inode *inode, int
- const void *value, size_t value_len, int flags)
- {
- handle_t *handle;
-+ struct super_block *sb = inode->i_sb;
-+ int buffer_credits;
- int error, retries = 0;
-
-+ buffer_credits = EXT4_DATA_TRANS_BLOCKS(sb);
-+ if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
-+ EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
-+ int nrblocks = (value_len + sb->s_blocksize - 1) >>
-+ sb->s_blocksize_bits;
-+
-+ /* For new inode */
-+ buffer_credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
-+
-+ /* For data blocks of EA inode */
-+ buffer_credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
-+ }
-+
- retry:
-- handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
-+ handle = ext4_journal_start(inode, buffer_credits);
- if (IS_ERR(handle)) {
- error = PTR_ERR(handle);
- } else {
-@@ -1100,7 +1411,7 @@ retry:
- value, value_len, flags);
- error2 = ext4_journal_stop(handle);
- if (error == -ENOSPC &&
-- ext4_should_retry_alloc(inode->i_sb, &retries))
-+ ext4_should_retry_alloc(sb, &retries))
- goto retry;
- if (error == 0)
- error = error2;
-@@ -1122,7 +1433,7 @@ static void ext4_xattr_shift_entries(str
-
- /* Adjust the value offsets of the entries */
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
-- if (!last->e_value_block && last->e_value_size) {
-+ if (!last->e_value_inum && last->e_value_size) {
- new_offs = le16_to_cpu(last->e_value_offs) +
- value_offs_shift;
- BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
-@@ -1355,22 +1666,135 @@ cleanup:
- return error;
- }
-
-+#define EIA_INCR 16 /* must be 2^n */
-+#define EIA_MASK (EIA_INCR - 1)
-+/* Add the large xattr @ino into @lea_ino_array for later deletion.
-+ * If @lea_ino_array is new or full it will be grown and the old
-+ * contents copied over.
-+ */
-+static int
-+ext4_expand_ino_array(struct ext4_xattr_ino_array **lea_ino_array, __u32 ino)
-+{
-+ if (*lea_ino_array == NULL) {
-+ /*
-+ * Start with 15 inodes, so it fits into a power-of-two size.
-+ * If *lea_ino_array is NULL, this is essentially offsetof()
-+ */
-+ (*lea_ino_array) =
-+ kmalloc(offsetof(struct ext4_xattr_ino_array,
-+ xia_inodes[EIA_MASK]),
-+ GFP_NOFS);
-+ if (*lea_ino_array == NULL)
-+ return -ENOMEM;
-+ (*lea_ino_array)->xia_count = 0;
-+ } else if (((*lea_ino_array)->xia_count & EIA_MASK) == EIA_MASK) {
-+ /* expand the array once all 15 + n * 16 slots are full */
-+ struct ext4_xattr_ino_array *new_array = NULL;
-+ int count = (*lea_ino_array)->xia_count;
-+
-+ /* if new_array is NULL, this is essentially offsetof() */
-+ new_array = kmalloc(
-+ offsetof(struct ext4_xattr_ino_array,
-+ xia_inodes[count + EIA_INCR]),
-+ GFP_NOFS);
-+ if (new_array == NULL)
-+ return -ENOMEM;
-+ memcpy(new_array, *lea_ino_array,
-+ offsetof(struct ext4_xattr_ino_array,
-+ xia_inodes[count]));
-+ kfree(*lea_ino_array);
-+ *lea_ino_array = new_array;
-+ }
-+ (*lea_ino_array)->xia_inodes[(*lea_ino_array)->xia_count++] = ino;
-+ return 0;
-+}
-
-+/**
-+ * Add xattr inode to orphan list
-+ */
-+static int
-+ext4_xattr_inode_orphan_add(handle_t *handle, struct inode *inode,
-+ int credits, struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+ struct inode *ea_inode = NULL;
-+ int idx = 0, error = 0;
-+
-+ if (lea_ino_array == NULL)
-+ return 0;
-+
-+ for (; idx < lea_ino_array->xia_count; ++idx) {
-+ if (!ext4_handle_has_enough_credits(handle, credits)) {
-+ error = ext4_journal_extend(handle, credits);
-+ if (error > 0)
-+ error = ext4_journal_restart(handle, credits);
-+
-+ if (error != 0) {
-+ ext4_warning(inode->i_sb,
-+ "couldn't extend journal "
-+ "(err %d)", error);
-+ return error;
-+ }
-+ }
-+ ea_inode = ext4_xattr_inode_iget(inode,
-+ lea_ino_array->xia_inodes[idx], &error);
-+ if (error)
-+ continue;
-+ ext4_orphan_add(handle, ea_inode);
-+ /* the inode's i_count will be released by caller */
-+ }
-+
-+ return 0;
-+}
-
- /*
- * ext4_xattr_delete_inode()
- *
-- * Free extended attribute resources associated with this inode. This
-+ * Free extended attribute resources associated with this inode. Traverse
-+ * all entries and unlink any xattr inodes associated with this inode. This
- * is called immediately before an inode is freed. We have exclusive
-- * access to the inode.
-+ * access to the inode. If an orphan inode is deleted it will also delete any
-+ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
-+ * to ensure they belong to the parent inode and were not deleted already.
- */
--void
--ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-+int
-+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+ struct ext4_xattr_ino_array **lea_ino_array)
- {
- struct buffer_head *bh = NULL;
-+ struct ext4_xattr_ibody_header *header;
-+ struct ext4_inode *raw_inode;
-+ struct ext4_iloc iloc;
-+ struct ext4_xattr_entry *entry;
-+ int credits = 3, error = 0;
-
-- if (!EXT4_I(inode)->i_file_acl)
-+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
-+ goto delete_external_ea;
-+
-+ error = ext4_get_inode_loc(inode, &iloc);
-+ if (error)
-+ goto cleanup;
-+ raw_inode = ext4_raw_inode(&iloc);
-+ header = IHDR(inode, raw_inode);
-+ for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
-+ entry = EXT4_XATTR_NEXT(entry)) {
-+ if (!entry->e_value_inum)
-+ continue;
-+ if (ext4_expand_ino_array(lea_ino_array,
-+ entry->e_value_inum) != 0) {
-+ brelse(iloc.bh);
-+ goto cleanup;
-+ }
-+ entry->e_value_inum = 0;
-+ }
-+ brelse(iloc.bh);
-+
-+delete_external_ea:
-+ if (!EXT4_I(inode)->i_file_acl) {
-+ /* add xattr inode to orphan list */
-+ ext4_xattr_inode_orphan_add(handle, inode, credits,
-+ *lea_ino_array);
- goto cleanup;
-+ }
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
- ext4_error(inode->i_sb, "inode %lu: block %llu read error",
-@@ -1383,11 +1807,71 @@ ext4_xattr_delete_inode(handle_t *handle
- inode->i_ino, EXT4_I(inode)->i_file_acl);
- goto cleanup;
- }
-+
-+ for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
-+ entry = EXT4_XATTR_NEXT(entry)) {
-+ if (!entry->e_value_inum)
-+ continue;
-+ if (ext4_expand_ino_array(lea_ino_array,
-+ entry->e_value_inum) != 0)
-+ goto cleanup;
-+ entry->e_value_inum = 0;
-+ }
-+
-+ /* add xattr inode to orphan list */
-+ error = ext4_xattr_inode_orphan_add(handle, inode, credits,
-+ *lea_ino_array);
-+ if (error != 0)
-+ goto cleanup;
-+
-+ if (!IS_NOQUOTA(inode))
-+ credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+ if (!ext4_handle_has_enough_credits(handle, credits)) {
-+ error = ext4_journal_extend(handle, credits);
-+ if (error > 0)
-+ error = ext4_journal_restart(handle, credits);
-+ if (error != 0) {
-+ ext4_warning(inode->i_sb,
-+ "couldn't extend journal (err %d)", error);
-+ goto cleanup;
-+ }
-+ }
-+
- ext4_xattr_release_block(handle, inode, bh);
- EXT4_I(inode)->i_file_acl = 0;
-
- cleanup:
- brelse(bh);
-+
-+ return error;
-+}
-+
-+void
-+ext4_xattr_inode_array_free(struct inode *inode,
-+ struct ext4_xattr_ino_array *lea_ino_array)
-+{
-+ struct inode *ea_inode = NULL;
-+ int idx = 0;
-+ int err;
-+
-+ if (lea_ino_array == NULL)
-+ return;
-+
-+ for (; idx < lea_ino_array->xia_count; ++idx) {
-+ ea_inode = ext4_xattr_inode_iget(inode,
-+ lea_ino_array->xia_inodes[idx], &err);
-+ if (err)
-+ continue;
-+
-+ /* for inode's i_count get from ext4_xattr_delete_inode */
-+ if (!list_empty(&EXT4_I(ea_inode)->i_orphan))
-+ iput(ea_inode);
-+
-+ ea_inode->i_nlink = 0;
-+ iput(ea_inode);
-+ }
-+ kfree(lea_ino_array);
- }
-
- /*
-@@ -1457,10 +1941,9 @@ ext4_xattr_cmp(struct ext4_xattr_header
- entry1->e_name_index != entry2->e_name_index ||
- entry1->e_name_len != entry2->e_name_len ||
- entry1->e_value_size != entry2->e_value_size ||
-+ entry1->e_value_inum != entry2->e_value_inum ||
- memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
- return 1;
-- if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
-- return -EIO;
- if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
- (char *)header2 + le16_to_cpu(entry2->e_value_offs),
- le32_to_cpu(entry1->e_value_size)))
-@@ -1545,7 +2028,7 @@ static inline void ext4_xattr_hash_entry
- *name++;
- }
-
-- if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-+ if (!entry->e_value_inum && entry->e_value_size) {
- __le32 *value = (__le32 *)((char *)header +
- le16_to_cpu(entry->e_value_offs));
- for (n = (le32_to_cpu(entry->e_value_size) +
-Index: linux-stage/fs/ext4/xattr.h
-===================================================================
---- linux-stage.orig/fs/ext4/xattr.h
-+++ linux-stage/fs/ext4/xattr.h
-@@ -38,7 +38,7 @@ struct ext4_xattr_entry {
- __u8 e_name_len; /* length of name */
- __u8 e_name_index; /* attribute name index */
- __le16 e_value_offs; /* offset in disk block of value */
-- __le32 e_value_block; /* disk block attribute is stored on (n/i) */
-+ __le32 e_value_inum; /* inode in which the value is stored */
- __le32 e_value_size; /* size of attribute value */
- __le32 e_hash; /* hash value of name and value */
- char e_name[0]; /* attribute name */
-@@ -63,6 +63,26 @@ struct ext4_xattr_entry {
- EXT4_I(inode)->i_extra_isize))
- #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-
-+/*
-+ * Link EA inode back to parent one using i_mtime field.
-+ * Extra integer type conversion added to ignore higher
-+ * bits in i_mtime.tv_sec which might be set by ext4_get()
-+ */
-+#define EXT4_XATTR_INODE_SET_PARENT(inode, inum) \
-+do { \
-+ (inode)->i_mtime.tv_sec = inum; \
-+} while(0)
-+
-+#define EXT4_XATTR_INODE_GET_PARENT(inode) \
-+((__u32)(inode)->i_mtime.tv_sec)
-+
-+/*
-+ * The minimum size of EA value when you start storing it in an external inode
-+ * size of block - size of header - size of 1 entry - 4 null bytes
-+*/
-+#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \
-+ ((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
-+
- # ifdef CONFIG_EXT4_FS_XATTR
-
- extern struct xattr_handler ext4_xattr_user_handler;
-@@ -77,7 +86,13 @@ extern int ext4_xattr_get(struct inode *
- extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
- extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-
--extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
-+extern struct inode *ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
-+ int *err);
-+extern int ext4_xattr_inode_unlink(struct inode *inode, unsigned long ea_ino);
-+extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+ struct ext4_xattr_ino_array **array);
-+extern void ext4_xattr_inode_array_free(struct inode *inode,
-+ struct ext4_xattr_ino_array *array);
- extern void ext4_xattr_put_super(struct super_block *);
-
- extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
-@@ -111,9 +126,11 @@ ext4_xattr_set_handle(handle_t *handle,
- return -EOPNOTSUPP;
- }
-
--static inline void
--ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-+inline int
-+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
-+ struct ext4_xattr_ino_array **array)
- {
-+ return -EOPNOTSUPP;
- }
-
- static inline void
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -222,6 +222,8 @@ void ext4_delete_inode(struct inode *ino
- {
- handle_t *handle;
- int err;
-+ int extra_credits = 3;
-+ struct ext4_xattr_ino_array *lea_ino_array = NULL;
-
- if (ext4_should_order_data(inode))
- ext4_begin_ordered_truncate(inode, 0);
-@@ -235,7 +237,8 @@ void ext4_delete_inode(struct inode *ino
- * protection against it
- */
- sb_start_intwrite(inode->i_sb);
-- handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
-+
-+ handle = ext4_journal_start(inode, extra_credits);
- if (IS_ERR(handle)) {
- ext4_std_error(inode->i_sb, PTR_ERR(handle));
- /*
-@@ -247,9 +250,36 @@ void ext4_delete_inode(struct inode *ino
- sb_end_intwrite(inode->i_sb);
- goto no_delete;
- }
--
- if (IS_SYNC(inode))
- ext4_handle_sync(handle);
-+
-+ /*
-+ * Delete xattr inode before deleting the main inode.
-+ */
-+ err = ext4_xattr_delete_inode(handle, inode, &lea_ino_array);
-+ if (err) {
-+ ext4_warning(inode->i_sb,
-+ "couldn't delete inode's xattr (err %d)", err);
-+ goto stop_handle;
-+ }
-+
-+ if (!IS_NOQUOTA(inode))
-+ extra_credits += 2 * EXT4_QUOTA_DEL_BLOCKS(inode->i_sb);
-+
-+ if (!ext4_handle_has_enough_credits(handle,
-+ blocks_for_truncate(inode) + extra_credits)) {
-+ err = ext4_journal_extend(handle,
-+ blocks_for_truncate(inode) + extra_credits);
-+ if (err > 0)
-+ err = ext4_journal_restart(handle,
-+ blocks_for_truncate(inode) + extra_credits);
-+ if (err != 0) {
-+ ext4_warning(inode->i_sb,
-+ "couldn't extend journal (err %d)", err);
-+ goto stop_handle;
-+ }
-+ }
-+
- inode->i_size = 0;
- err = ext4_mark_inode_dirty(handle, inode);
- if (err) {
-@@ -266,10 +296,10 @@ void ext4_delete_inode(struct inode *ino
- * enough credits left in the handle to remove the inode from
- * the orphan list and set the dtime field.
- */
-- if (!ext4_handle_has_enough_credits(handle, 3)) {
-- err = ext4_journal_extend(handle, 3);
-+ if (!ext4_handle_has_enough_credits(handle, extra_credits)) {
-+ err = ext4_journal_extend(handle, extra_credits);
- if (err > 0)
-- err = ext4_journal_restart(handle, 3);
-+ err = ext4_journal_restart(handle, extra_credits);
- if (err != 0) {
- ext4_warning(inode->i_sb,
- "couldn't extend journal (err %d)", err);
-@@ -303,8 +333,12 @@ void ext4_delete_inode(struct inode *ino
- clear_inode(inode);
- else
- ext4_free_inode(handle, inode);
-+
- ext4_journal_stop(handle);
- sb_end_intwrite(inode->i_sb);
-+
-+ if (lea_ino_array != NULL)
-+ ext4_xattr_inode_array_free(inode, lea_ino_array);
- return;
- no_delete:
- clear_inode(inode); /* We must guarantee clearing of inode... */
-Index: linux-stage/fs/ext4/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext4/ialloc.c
-+++ linux-stage/fs/ext4/ialloc.c
-@@ -219,7 +219,6 @@ void ext4_free_inode(handle_t *handle, s
- * as writing the quota to disk may need the lock as well.
- */
- vfs_dq_init(inode);
-- ext4_xattr_delete_inode(handle, inode);
- vfs_dq_free_inode(inode);
- vfs_dq_drop(inode);
-
+++ /dev/null
-Index: linux-2.6.18.i386/fs/ext4/namei.c
-===================================================================
---- linux-2.6.18.i386.orig/fs/ext4/namei.c
-+++ linux-2.6.18.i386/fs/ext4/namei.c
-@@ -1067,6 +1067,38 @@ static struct dentry *ext4_lookup(struct
- }
- }
- }
-+ /* ".." shouldn't go into dcache to preserve dcache hierarchy
-+ * otherwise we'll get parent being a child of actual child.
-+ * see bug 10458 for details -bzzz */
-+ if (inode && (dentry->d_name.name[0] == '.' && (dentry->d_name.len == 1 ||
-+ (dentry->d_name.len == 2 && dentry->d_name.name[1] == '.')))) {
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* first, look for an existing dentry - any one is good */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ if (goal == NULL) {
-+ /* there is no alias, we need to make current dentry:
-+ * a) inaccessible for __d_lookup()
-+ * b) inaccessible for iopen */
-+ J_ASSERT(list_empty(&dentry->d_alias));
-+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-+ /* this is d_instantiate() ... */
-+ list_add(&dentry->d_alias, &inode->i_dentry);
-+ dentry->d_inode = inode;
-+ }
-+ spin_unlock(&dcache_lock);
-+ if (goal)
-+ iput(inode);
-+ return goal;
-+ }
-+
- return d_splice_alias(inode, dentry);
- }
-
+++ /dev/null
-diff -ur a/fs/ext4/ext4.h b/fs/ext4/ext4.h
---- a/fs/ext4/ext4.h 2013-03-14 12:04:44.105541822 -0400
-+++ b/fs/ext4/ext4.h 2013-03-14 12:09:14.264489405 -0400
-@@ -1661,6 +1661,8 @@
- extern int ext4_block_truncate_page(handle_t *handle,
- struct address_space *mapping, loff_t from);
- extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
-+extern int ext4_map_inode_page(struct inode *inode, struct page *page,
-+ sector_t *blocks, int create);
- extern qsize_t *ext4_get_reserved_space(struct inode *inode);
- extern int flush_aio_dio_completed_IO(struct inode *inode);
- extern void ext4_da_update_reserve_space(struct inode *inode,
-diff -ur a/fs/ext4/inode.c b/fs/ext4/inode.c
---- a/fs/ext4/inode.c 2013-03-14 12:04:44.103541330 -0400
-+++ b/fs/ext4/inode.c 2013-03-14 12:11:16.526353498 -0400
-@@ -6131,3 +6131,62 @@
- out:
- return ret;
- }
-+
-+int ext4_map_inode_page(struct inode *inode, struct page *page,
-+ sector_t *blocks, int create)
-+{
-+ unsigned int blocksize, blocks_per_page;
-+ unsigned long iblock;
-+ struct buffer_head dummy;
-+ void *handle;
-+ int i, rc = 0, failed = 0, needed_blocks;
-+
-+ blocksize = inode->i_sb->s_blocksize;
-+ blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
-+ iblock = page->index * blocks_per_page;
-+
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ blocks[i] = ext4_bmap(inode->i_mapping, iblock);
-+ if (blocks[i] == 0) {
-+ failed++;
-+ }
-+ }
-+
-+ if (failed == 0 || create == 0)
-+ return 0;
-+
-+ needed_blocks = ext4_writepage_trans_blocks(inode);
-+ handle = ext4_journal_start(inode, needed_blocks);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ iblock = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ if (blocks[i] != 0)
-+ continue;
-+
-+ rc = ext4_ind_get_blocks(handle, inode, iblock, 1, &dummy,
-+ EXT4_GET_BLOCKS_CREATE);
-+ if (rc < 0) {
-+ printk(KERN_INFO "ext4_map_inode_page: error reading "
-+ "block %ld\n", iblock);
-+ goto out;
-+ } else {
-+ if (rc > 1)
-+ WARN_ON(1);
-+ rc = 0;
-+ }
-+ /* Unmap any metadata buffers from the block mapping, to avoid
-+ * data corruption due to direct-write from Lustre being
-+ * clobbered by a later flush of the blockdev metadata buffer.*/
-+ if (buffer_new(&dummy))
-+ unmap_underlying_metadata(dummy.b_bdev,
-+ dummy.b_blocknr);
-+ blocks[i] = dummy.b_blocknr;
-+ }
-+
-+out:
-+ ext4_journal_stop(handle);
-+ return rc;
-+}
-+EXPORT_SYMBOL(ext4_map_inode_page);
+++ /dev/null
-diff -urpN linux-stage.orig/fs/ext4/super.c linux-stage/fs/ext4/super.c
---- linux-stage.orig/fs/ext4/super.c 2013-05-13 10:29:34.125478791 -0400
-+++ linux-stage/fs/ext4/super.c 2013-05-13 10:31:59.800359005 -0400
-@@ -1264,8 +1264,8 @@ enum {
- Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
- Opt_extents, Opt_noextents,
- Opt_no_mbcache,
-- Opt_discard, Opt_nodiscard,
-- Opt_init_inode_table, Opt_noinit_inode_table,
-+ Opt_discard, Opt_nodiscard, Opt_init_inode_table, Opt_noinit_inode_table,
-+ Opt_max_dir_size_kb,
- };
-
- static const match_table_t tokens = {
-@@ -1346,6 +1346,7 @@ static const match_table_t tokens = {
- {Opt_init_inode_table, "init_itable=%u"},
- {Opt_init_inode_table, "init_itable"},
- {Opt_noinit_inode_table, "noinit_itable"},
-+ {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
- {Opt_err, NULL},
- };
-
-@@ -1732,6 +1733,13 @@ set_qf_format:
- case Opt_nodelalloc:
- clear_opt(sbi->s_mount_opt, DELALLOC);
- break;
-+ case Opt_max_dir_size_kb:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_max_dir_size = option * 1024;
-+ break;
- case Opt_stripe:
- if (match_int(&args[0], &option))
- return 0;
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2011-03-14 16:18:28.300241437 +0800
-+++ linux-stage/fs/ext4/ext4.h 2011-03-14 16:33:17.056087375 +0800
-@@ -1770,6 +1770,7 @@
- ext4_grpblk_t bb_free; /* total free blocks */
- ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
- struct list_head bb_prealloc_list;
-+ unsigned long bb_prealloc_nr;
- #ifdef DOUBLE_CHECK
- void *bb_bitmap;
- #endif
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c 2011-03-14 16:18:28.336242149 +0800
-+++ linux-stage/fs/ext4/mballoc.c 2011-03-14 16:33:27.072292006 +0800
-@@ -337,7 +337,7 @@
- static struct kmem_cache *ext4_pspace_cachep;
- static struct kmem_cache *ext4_ac_cachep;
- static struct kmem_cache *ext4_free_ext_cachep;
--static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
-+static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group);
- static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group);
-@@ -659,7 +659,7 @@
- }
-
- static noinline_for_stack
--void ext4_mb_generate_buddy(struct super_block *sb,
-+int ext4_mb_generate_buddy(struct super_block *sb,
- void *buddy, void *bitmap, ext4_group_t group)
- {
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
-@@ -691,14 +691,13 @@
- grp->bb_fragments = fragments;
-
- if (free != grp->bb_free) {
-- ext4_grp_locked_error(sb, group, __func__,
-- "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
-- group, free, grp->bb_free);
-- /*
-- * If we intent to continue, we consider group descritor
-- * corrupt and update bb_free using bitmap value
-- */
-- grp->bb_free = free;
-+ struct ext4_group_desc *gdp;
-+ gdp = ext4_get_group_desc (sb, group, NULL);
-+ ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
-+ "%u in gd, %lu pa's\n", (long unsigned int)group,
-+ free, grp->bb_free, ext4_free_blks_count(sb, gdp),
-+ grp->bb_prealloc_nr);
-+ return -EIO;
- }
-
- clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
-@@ -708,6 +707,8 @@
- EXT4_SB(sb)->s_mb_buddies_generated++;
- EXT4_SB(sb)->s_mb_generation_time += period;
- spin_unlock(&EXT4_SB(sb)->s_bal_lock);
-+
-+ return 0;
- }
-
- /* The buddy information is attached the buddy cache inode
-@@ -839,7 +840,7 @@
-
- err = 0;
- first_block = page->index * blocks_per_page;
-- for (i = 0; i < blocks_per_page; i++) {
-+ for (i = 0; i < blocks_per_page && err == 0; i++) {
- int group;
- struct ext4_group_info *grinfo;
-
-@@ -874,7 +875,7 @@
- ext4_lock_group(sb, group);
- /* init the buddy */
- memset(data, 0xff, blocksize);
-- ext4_mb_generate_buddy(sb, data, incore, group);
-+ err = ext4_mb_generate_buddy(sb, data, incore, group);
- ext4_unlock_group(sb, group);
- incore = NULL;
- } else {
-@@ -888,7 +889,7 @@
- memcpy(data, bitmap, blocksize);
-
- /* mark all preallocated blks used in in-core bitmap */
-- ext4_mb_generate_from_pa(sb, data, group);
-+ err = ext4_mb_generate_from_pa(sb, data, group);
- ext4_mb_generate_from_freelist(sb, data, group);
- ext4_unlock_group(sb, group);
-
-@@ -898,7 +899,8 @@
- incore = data;
- }
- }
-- SetPageUptodate(page);
-+ if (likely(err == 0))
-+ SetPageUptodate(page);
-
- out:
- if (bh) {
-@@ -2142,9 +2144,11 @@
- static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
- {
- struct super_block *sb = seq->private;
-+ struct ext4_group_desc *gdp;
- ext4_group_t group = (ext4_group_t) ((unsigned long) v);
- int i;
- int err;
-+ int free = 0;
- struct ext4_buddy e4b;
- struct sg {
- struct ext4_group_info info;
-@@ -2153,10 +2157,10 @@
-
- group--;
- if (group == 0)
-- seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s %-5s %-5s"
- "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
- "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-- "group", "free", "frags", "first",
-+ "group", "free", "free", "frags", "first", "pa",
- "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
- "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
-
-@@ -2167,13 +2171,20 @@
- seq_printf(seq, "#%-5u: I/O error\n", group);
- return 0;
- }
-+
-+ gdp = ext4_get_group_desc(sb, group, NULL);
-+ if (gdp != NULL)
-+ free = ext4_free_blks_count(sb, gdp);
-+
- ext4_lock_group(sb, group);
- memcpy(&sg, ext4_get_group_info(sb, group), i);
- ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
-
-- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
-- sg.info.bb_fragments, sg.info.bb_first_free);
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
-+ (long unsigned int)group, sg.info.bb_free, free,
-+ sg.info.bb_fragments, sg.info.bb_first_free,
-+ sg.info.bb_prealloc_nr);
- for (i = 0; i <= 13; i++)
- seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
- sg.info.bb_counters[i] : 0);
-@@ -3354,23 +3365,72 @@
- }
-
- /*
-+ * check free blocks in bitmap match free block in group descriptor
-+ * do this before taking preallocated blocks into account to be able
-+ * to detect on-disk corruptions. The group lock should be hold by the
-+ * caller.
-+ */
-+int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
-+ struct ext4_group_desc *gdp, int group)
-+{
-+ unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
-+ unsigned short i, first, free = 0;
-+ unsigned short free_in_gdp = ext4_free_blks_count(sb, gdp);
-+
-+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
-+ return 0;
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+
-+ while (i < max) {
-+ first = i;
-+ i = mb_find_next_bit(bitmap, max, i);
-+ if (i > max)
-+ i = max;
-+ free += i - first;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+
-+ if (free != free_in_gdp) {
-+ ext4_error(sb, "on-disk bitmap for group %d"
-+ "corrupted: %u blocks free in bitmap, %u - in gd\n",
-+ group, free, free_in_gdp);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+/*
- * the function goes through all preallocation in this group and marks them
- * used in in-core bitmap. buddy must be generated from this bitmap
- * Need to be called with ext4 group lock held
- */
- static noinline_for_stack
--void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
-+int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group)
- {
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
- struct ext4_prealloc_space *pa;
-+ struct ext4_group_desc *gdp;
- struct list_head *cur;
- ext4_group_t groupnr;
- ext4_grpblk_t start;
- int preallocated = 0;
- int count = 0;
-+ int skip = 0;
-+ int err;
- int len;
-
-+ gdp = ext4_get_group_desc (sb, group, NULL);
-+ if (gdp == NULL)
-+ return -EIO;
-+
-+ /* before applying preallocations, check bitmap consistency */
-+ err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
-+ if (err)
-+ return err;
-+
- /* all form of preallocation discards first load group,
- * so the only competing code is preallocation use.
- * we don't need any locking here
-@@ -3386,14 +3442,23 @@
- &groupnr, &start);
- len = pa->pa_len;
- spin_unlock(&pa->pa_lock);
-- if (unlikely(len == 0))
-+ if (unlikely(len == 0)) {
-+ skip++;
- continue;
-+ }
- BUG_ON(groupnr != group);
- mb_set_bits(bitmap, start, len);
- preallocated += len;
- count++;
- }
-+ if (count + skip != grp->bb_prealloc_nr) {
-+ ext4_error(sb, "lost preallocations: "
-+ "count %d, bb_prealloc_nr %lu, skip %d\n",
-+ count, grp->bb_prealloc_nr, skip);
-+ return -EIO;
-+ }
- mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
-+ return 0;
- }
-
- static void ext4_mb_pa_callback(struct rcu_head *head)
-@@ -3452,6 +3517,7 @@
- */
- ext4_lock_group(sb, grp);
- list_del(&pa->pa_group_list);
-+ ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
- ext4_unlock_group(sb, grp);
-
- spin_lock(pa->pa_obj_lock);
-@@ -3543,6 +3609,7 @@
-
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
- list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
-+ grp->bb_prealloc_nr++;
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
-
- spin_lock(pa->pa_obj_lock);
-@@ -3604,6 +3671,7 @@
-
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
- list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
-+ grp->bb_prealloc_nr++;
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
-
- /*
-@@ -3802,6 +3870,8 @@
-
- spin_unlock(&pa->pa_lock);
-
-+ BUG_ON(grp->bb_prealloc_nr == 0);
-+ grp->bb_prealloc_nr--;
- list_del(&pa->pa_group_list);
- list_add(&pa->u.pa_tmp_list, &list);
- }
-@@ -3942,7 +4012,7 @@
- if (err) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
-- continue;
-+ return;
- }
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
-@@ -3954,6 +4024,8 @@
- }
-
- ext4_lock_group(sb, group);
-+ BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
-+ e4b.bd_info->bb_prealloc_nr--;
- list_del(&pa->pa_group_list);
- ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
- ext4_unlock_group(sb, group);
-@@ -4227,6 +4299,7 @@
- }
- ext4_lock_group(sb, group);
- list_del(&pa->pa_group_list);
-+ ext4_get_group_info(sb, group)->bb_prealloc_nr--;
- ext4_mb_release_group_pa(&e4b, pa, ac);
- ext4_unlock_group(sb, group);
-
-Index: linux-stage/fs/ext4/mballoc.h
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.h 2011-03-14 16:18:26.670209322 +0800
-+++ linux-stage/fs/ext4/mballoc.h 2011-03-14 16:32:50.859552482 +0800
-@@ -88,7 +88,7 @@
- /*
- * for which requests use 2^N search using buddies
- */
--#define MB_DEFAULT_ORDER2_REQS 2
-+#define MB_DEFAULT_ORDER2_REQS 8
-
- /*
- * default group prealloc size 512 blocks
+++ /dev/null
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c 2011-03-14 16:34:39.790758415 +0800
-+++ linux-stage/fs/ext4/mballoc.c 2011-03-14 16:38:36.211681104 +0800
-@@ -3593,6 +3593,7 @@
- INIT_LIST_HEAD(&pa->pa_group_list);
- pa->pa_deleted = 0;
- pa->pa_type = MB_INODE_PA;
-+ pa->pa_error = 0;
-
- mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-@@ -3654,6 +3655,7 @@
- INIT_LIST_HEAD(&pa->pa_group_list);
- pa->pa_deleted = 0;
- pa->pa_type = MB_GROUP_PA;
-+ pa->pa_error = 0;
-
- mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-@@ -3716,7 +3718,9 @@
- int err = 0;
- int free = 0;
-
-+ assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
- BUG_ON(pa->pa_deleted == 0);
-+ BUG_ON(pa->pa_inode == NULL);
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- grp_blk_start = pa->pa_pstart - bit;
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
-@@ -3752,19 +3756,27 @@
- mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
- bit = next + 1;
- }
-- if (free != pa->pa_free) {
-- printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
-- pa, (unsigned long) pa->pa_lstart,
-- (unsigned long) pa->pa_pstart,
-- (unsigned long) pa->pa_len);
-+
-+ /* "free < pa->pa_free" means we maybe double alloc the same blocks,
-+ * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
-+ if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
-+ ext4_error(sb, "pa free mismatch: [pa %p] "
-+ "[phy %lu] [logic %lu] [len %u] [free %u] "
-+ "[error %u] [inode %lu] [freed %u]", pa,
-+ (unsigned long)pa->pa_pstart,
-+ (unsigned long)pa->pa_lstart,
-+ (unsigned)pa->pa_len, (unsigned)pa->pa_free,
-+ (unsigned)pa->pa_error, pa->pa_inode->i_ino,
-+ free);
- ext4_grp_locked_error(sb, group,
-- __func__, "free %u, pa_free %u",
-- free, pa->pa_free);
-+ __func__, "free %u, pa_free %u",
-+ free, pa->pa_free);
- /*
- * pa is already deleted so we use the value obtained
- * from the bitmap and continue.
- */
- }
-+ BUG_ON(pa->pa_free != free);
- atomic_add(free, &sbi->s_mb_discarded);
-
- return err;
-@@ -4511,6 +4541,25 @@
- ac->ac_b_ex.fe_len = 0;
- ar->len = 0;
- ext4_mb_show_ac(ac);
-+ if (ac->ac_pa) {
-+ struct ext4_prealloc_space *pa = ac->ac_pa;
-+
-+ /* We can not make sure whether the bitmap has
-+ * been updated or not when fail case. So can
-+ * not revert pa_free back, just mark pa_error*/
-+ pa->pa_error++;
-+ ext4_error(sb,
-+ "Updating bitmap error: [err %d] "
-+ "[pa %p] [phy %lu] [logic %lu] "
-+ "[len %u] [free %u] [error %u] "
-+ "[inode %lu]", *errp, pa,
-+ (unsigned long)pa->pa_pstart,
-+ (unsigned long)pa->pa_lstart,
-+ (unsigned)pa->pa_len,
-+ (unsigned)pa->pa_free,
-+ (unsigned)pa->pa_error,
-+ pa->pa_inode ? pa->pa_inode->i_ino : 0);
-+ }
- } else {
- block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
- ar->len = ac->ac_b_ex.fe_len;
-Index: linux-stage/fs/ext4/mballoc.h
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.h 2011-03-14 16:32:50.859552482 +0800
-+++ linux-stage/fs/ext4/mballoc.h 2011-03-14 16:39:20.928429776 +0800
-@@ -20,6 +20,7 @@
- #include <linux/version.h>
- #include <linux/blkdev.h>
- #include <linux/mutex.h>
-+#include <linux/genhd.h>
- #include "ext4_jbd2.h"
- #include "ext4.h"
-
-@@ -130,6 +131,7 @@
- ext4_grpblk_t pa_free; /* how many blocks are free */
- unsigned short pa_type; /* pa type. inode or group */
- spinlock_t *pa_obj_lock;
-+ unsigned short pa_error;
- struct inode *pa_inode; /* hack, for history only */
- };
-
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2011-05-20 10:59:32.000000000 +0300
-+++ linux-stage/fs/ext4/ext4.h 2011-05-20 11:01:06.000000000 +0300
-@@ -1630,6 +1633,9 @@ extern void ext4_mb_put_buddy_cache_lock
- ext4_group_t, int);
- extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
-
-+extern void ext4_mb_discard_inode_preallocations(struct inode *);
-+
-+
- /* inode.c */
- int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
- struct buffer_head *bh, ext4_fsblk_t blocknr);
-Index: linux-stage/fs/ext4/ext4_extents.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_extents.h 2011-05-20 10:59:30.000000000 +0300
-+++ linux-stage/fs/ext4/ext4_extents.h 2011-05-20 11:00:01.000000000 +0300
-@@ -58,6 +58,12 @@
- */
- #define EXT_STATS_
-
-+/*
-+ * define EXT4_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb
-+ * are now accounted in ext4_ext_calc_credits_for_insert()
-+ */
-+#define EXT4_ALLOC_NEEDED 0
-+#define HAVE_EXT_PREPARE_CB_EXTENT
-
- /*
- * ext4_inode has i_block array (60 bytes total).
-@@ -239,6 +245,8 @@ extern int ext4_extent_tree_init(handle_
- extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
- int num,
- struct ext4_ext_path *path);
-+extern int ext4_ext_calc_credits_for_insert(struct inode *,
-+ struct ext4_ext_path *);
- extern int ext4_can_extents_be_merged(struct inode *inode,
- struct ext4_extent *ex1,
- struct ext4_extent *ex2);
-Index: linux-stage/fs/ext4/ext4_jbd2.c
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.c 2011-05-20 10:59:29.000000000 +0300
-+++ linux-stage/fs/ext4/ext4_jbd2.c 2011-05-20 11:00:01.000000000 +0300
-@@ -31,6 +31,7 @@ int __ext4_journal_get_write_access(cons
- }
- return err;
- }
-+EXPORT_SYMBOL(__ext4_journal_get_write_access);
-
- int __ext4_journal_forget(const char *where, handle_t *handle,
- struct buffer_head *bh)
-@@ -107,3 +108,4 @@ int __ext4_handle_dirty_metadata(const c
- }
- return err;
- }
-+EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c
-+++ linux-stage/fs/ext4/extents.c
-@@ -2133,6 +2133,55 @@ int ext4_ext_calc_credits_for_single_ext
- }
-
- /*
-+ * This routine returns max. credits extent tree can consume.
-+ * It should be OK for low-performance paths like ->writepage()
-+ * To allow many writing process to fit a single transaction,
-+ * caller should calculate credits under truncate_mutex and
-+ * pass actual path.
-+ */
-+int ext4_ext_calc_credits_for_insert(struct inode *inode,
-+ struct ext4_ext_path *path)
-+{
-+ int depth, needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ depth = path->p_depth;
-+ if (le16_to_cpu(path[depth].p_hdr->eh_entries)
-+ < le16_to_cpu(path[depth].p_hdr->eh_max))
-+ return 1;
-+ }
-+
-+ /*
-+ * given 32bit logical block (4294967296 blocks), max. tree
-+ * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
-+ * let's also add one more level for imbalance.
-+ */
-+ depth = 5;
-+
-+ /* allocation of new data block(s) */
-+ needed = 2;
-+
-+ /*
-+ * tree can be full, so it'd need to grow in depth:
-+ * we need one credit to modify old root, credits for
-+ * new root will be added in split accounting
-+ */
-+ needed += 1;
-+ /*
-+ * Index split can happen, we'd need:
-+ * allocate intermediate indexes (bitmap + group)
-+ * + change two blocks at each level, but root (already included)
-+ */
-+ needed += (depth * 2) + (depth * 2);
-+
-+ /* any allocation modifies superblock */
-+ needed += 1;
-+
-+ return needed;
-+}
-+
-+/*
- * How many index/leaf blocks need to change/allocate to modify nrblocks?
- *
- * if nrblocks are fit in a single extent (chunk flag is 1), then
-@@ -4029,3 +4079,14 @@ int ext4_fiemap(struct inode *inode, str
- return error;
- }
-
-+EXPORT_SYMBOL(ext4_ext_store_pblock);
-+EXPORT_SYMBOL(ext4_ext_search_right);
-+EXPORT_SYMBOL(ext4_ext_search_left);
-+EXPORT_SYMBOL(ext_pblock);
-+EXPORT_SYMBOL(ext4_ext_insert_extent);
-+EXPORT_SYMBOL(ext4_mb_new_blocks);
-+EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
-+EXPORT_SYMBOL(ext4_mark_inode_dirty);
-+EXPORT_SYMBOL(ext4_ext_walk_space);
-+EXPORT_SYMBOL(ext4_ext_find_extent);
-+EXPORT_SYMBOL(ext4_ext_drop_refs);
-+
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c 2011-05-20 10:59:31.000000000 +0300
-+++ linux-stage/fs/ext4/inode.c 2011-05-20 11:00:01.000000000 +0300
-@@ -5249,6 +5249,7 @@ bad_inode:
- iget_failed(inode);
- return ERR_PTR(ret);
- }
-+EXPORT_SYMBOL(ext4_iget);
-
- static int ext4_inode_blocks_set(handle_t *handle,
- struct ext4_inode *raw_inode,
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c 2011-05-20 10:59:32.000000000 +0300
-+++ linux-stage/fs/ext4/mballoc.c 2011-05-20 11:00:01.000000000 +0300
-@@ -4044,6 +4044,7 @@ repeat:
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
- }
-+EXPORT_SYMBOL(ext4_discard_preallocations);
-
- /*
- * finds all preallocated spaces and return blocks being freed to them
-@@ -5029,3 +5030,6 @@ int ext4_trim_fs(struct super_block *sb,
-
- return ret;
- }
-+
-+EXPORT_SYMBOL(ext4_free_blocks);
-+
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c 2011-05-20 10:59:31.000000000 +0300
-+++ linux-stage/fs/ext4/super.c 2011-05-20 11:00:01.000000000 +0300
-@@ -128,6 +128,7 @@ __u32 ext4_itable_unused_count(struct su
- (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
- }
-+EXPORT_SYMBOL(ext4_itable_unused_count);
-
- void ext4_block_bitmap_set(struct super_block *sb,
- struct ext4_group_desc *bg, ext4_fsblk_t blk)
+++ /dev/null
-From fe18d649891d813964d3aaeebad873f281627fbc Mon Sep 17 00:00:00 2001
-From: Li Dongyang <dongyangli@ddn.com>
-Date: Sat, 15 Sep 2018 17:11:25 -0400
-Subject: [PATCH] ext4: don't mark mmp buffer head dirty
-
-Marking mmp bh dirty before writing it will make writeback
-pick up mmp block later and submit a write, we don't want the
-duplicate write as kmmpd thread should have full control of
-reading and writing the mmp block.
-Another reason is we will also have random I/O error on
-the writeback request when blk integrity is enabled, because
-kmmpd could modify the content of the mmp block(e.g. setting
-new seq and time) while the mmp block is under I/O requested
-by writeback.
-
-Signed-off-by: Li Dongyang <dongyangli@ddn.com>
-Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-Reviewed-by: Andreas Dilger <adilger@dilger.ca>
-Cc: stable@vger.kernel.org
----
- fs/ext4/mmp.c | 1 -
- 1 file changed, 1 deletion(-)
-
-Index: linux-stage/fs/ext4/mmp.c
-===================================================================
---- linux-stage.orig/fs/ext4/mmp.c
-+++ linux-stage/fs/ext4/mmp.c
-@@ -12,7 +12,6 @@
- */
- static int write_mmp_block(struct buffer_head *bh)
- {
-- mark_buffer_dirty(bh);
- lock_buffer(bh);
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
+++ /dev/null
-Prevent an ext4 filesystem from being mounted multiple times.
-A sequence number is stored on disk and is periodically updated (every 5
-seconds by default) by a mounted filesystem.
-At mount time, we now wait for s_mmp_update_interval seconds to make sure
-that the MMP sequence does not change.
-In case of failure, the nodename, bdevname and the time at which the MMP
-block was last updated is displayed.
-Move all mmp code to a dedicated file (mmp.c).
-
-Signed-off-by: Andreas Dilger <adilger <at> whamcloud.com>
-Signed-off-by: Johann Lombardi <johann <at> whamcloud.com>
----
- fs/ext4/Makefile | 3 +-
- fs/ext4/ext4.h | 76 ++++++++++++-
- fs/ext4/mmp.c | 354 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
- fs/ext4/super.c | 18 +++-
- 4 files changed, 447 insertions(+), 4 deletions(-)
- create mode 100644 fs/ext4/mmp.c
-
-Index: linux-stage/fs/ext4/Makefile
-===================================================================
---- linux-stage.orig/fs/ext4/Makefile
-+++ linux-stage/fs/ext4/Makefile
-@@ -6,7 +6,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
-
- ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-- ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
-+ ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-+ mmp.o
-
- ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -893,7 +893,7 @@ struct ext4_super_block {
- __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
- __le32 s_flags; /* Miscellaneous flags */
- __le16 s_raid_stride; /* RAID stride */
-- __le16 s_mmp_interval; /* # seconds to wait in MMP checking */
-+ __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */
- __le64 s_mmp_block; /* Block for multi-mount protection */
- __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
- __u8 s_log_groups_per_flex; /* FLEX_BG group size */
-@@ -1040,6 +1040,9 @@ struct ext4_sb_info {
-
- /* workqueue for dio unwritten */
- struct workqueue_struct *dio_unwritten_wq;
-+
-+ /* Kernel thread for multiple mount protection */
-+ struct task_struct *s_mmp_tsk;
-
- /* Lazy inode table initialization info */
- struct ext4_li_request *s_li_request;
-@@ -1176,7 +1179,8 @@ static inline void ext4_clear_inode_stat
- EXT4_FEATURE_INCOMPAT_META_BG| \
- EXT4_FEATURE_INCOMPAT_EXTENTS| \
- EXT4_FEATURE_INCOMPAT_64BIT| \
-- EXT4_FEATURE_INCOMPAT_FLEX_BG)
-+ EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-+ EXT4_FEATURE_INCOMPAT_MMP)
- #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
-@@ -1383,6 +1387,67 @@ void ext4_get_group_no_and_offset(struct
- extern struct proc_dir_entry *ext4_proc_root;
-
- /*
-+ * This structure will be used for multiple mount protection. It will be
-+ * written into the block number saved in the s_mmp_block field in the
-+ * superblock. Programs that check MMP should assume that if
-+ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
-+ * to use the filesystem, regardless of how old the timestamp is.
-+ */
-+#define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */
-+#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
-+#define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */
-+#define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */
-+
-+struct mmp_struct {
-+ __le32 mmp_magic; /* Magic number for MMP */
-+ __le32 mmp_seq; /* Sequence no. updated periodically */
-+
-+ /*
-+ * mmp_time, mmp_nodename & mmp_bdevname are only used for information
-+ * purposes and do not affect the correctness of the algorithm
-+ */
-+ __le64 mmp_time; /* Time last updated */
-+ char mmp_nodename[64]; /* Node which last updated MMP block */
-+ char mmp_bdevname[32]; /* Bdev which last updated MMP block */
-+
-+ /*
-+ * mmp_check_interval is used to verify if the MMP block has been
-+ * updated on the block device. The value is updated based on the
-+ * maximum time to write the MMP block during an update cycle.
-+ */
-+ __le16 mmp_check_interval;
-+
-+ __le16 mmp_pad1;
-+ __le32 mmp_pad2[227];
-+};
-+
-+/* arguments passed to the mmp thread */
-+struct mmpd_data {
-+ struct buffer_head *bh; /* bh from initial read_mmp_block() */
-+ struct super_block *sb; /* super block of the fs */
-+};
-+
-+/*
-+ * Check interval multiplier
-+ * The MMP block is written every update interval and initially checked every
-+ * update interval x the multiplier (the value is then adapted based on the
-+ * write latency). The reason is that writes can be delayed under load and we
-+ * don't want readers to incorrectly assume that the filesystem is no longer
-+ * in use.
-+ */
-+#define EXT4_MMP_CHECK_MULT 2UL
-+
-+/*
-+ * Minimum interval for MMP checking in seconds.
-+ */
-+#define EXT4_MMP_MIN_CHECK_INTERVAL 5UL
-+
-+/*
-+ * Maximum interval for MMP checking in seconds.
-+ */
-+#define EXT4_MMP_MAX_CHECK_INTERVAL 300UL
-+
-+/*
- * Function prototypes
- */
-
-@@ -1552,6 +1617,10 @@ extern void __ext4_warning(struct super_
- #define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
- extern void ext4_msg(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-+extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
-+ const char *, const char *);
-+#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \
-+ msg)
- extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
- const char *, const char *, ...)
- __attribute__ ((format (printf, 4, 5)));
-@@ -1833,6 +1902,8 @@ extern int ext4_move_extents(struct file
- __u64 start_orig, __u64 start_donor,
- __u64 len, __u64 *moved_len);
-
-+/* mmp.c */
-+extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
-
- /*
- * Add new method to test wether block and inode bitmaps are properly
-Index: linux-stage/fs/ext4/mmp.c
-===================================================================
---- /dev/null
-+++ linux-stage/fs/ext4/mmp.c
-@@ -0,0 +1,357 @@
-+#include <linux/fs.h>
-+#include <linux/random.h>
-+#include <linux/buffer_head.h>
-+#include <linux/utsname.h>
-+#include <linux/kthread.h>
-+
-+#include "ext4.h"
-+
-+/*
-+ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
-+ * faster.
-+ */
-+static int write_mmp_block(struct buffer_head *bh)
-+{
-+ mark_buffer_dirty(bh);
-+ lock_buffer(bh);
-+ bh->b_end_io = end_buffer_write_sync;
-+ get_bh(bh);
-+ submit_bh(WRITE_SYNC, bh);
-+ wait_on_buffer(bh);
-+ if (unlikely(!buffer_uptodate(bh)))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Read the MMP block. It _must_ be read from disk and hence we clear the
-+ * uptodate flag on the buffer.
-+ */
-+static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
-+ ext4_fsblk_t mmp_block)
-+{
-+ struct mmp_struct *mmp;
-+
-+ if (*bh)
-+ clear_buffer_uptodate(*bh);
-+
-+ /* This would be sb_bread(sb, mmp_block), except we need to be sure
-+ * that the MD RAID device cache has been bypassed, and that the read
-+ * is not blocked in the elevator. */
-+ if (!*bh)
-+ *bh = sb_getblk(sb, mmp_block);
-+ if (*bh) {
-+ get_bh(*bh);
-+ lock_buffer(*bh);
-+ (*bh)->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ_SYNC, *bh);
-+ wait_on_buffer(*bh);
-+ if (!buffer_uptodate(*bh)) {
-+ brelse(*bh);
-+ *bh = NULL;
-+ }
-+ }
-+ if (!*bh) {
-+ ext4_warning(sb, "Error while reading MMP block %llu",
-+ mmp_block);
-+ return -EIO;
-+ }
-+
-+ mmp = (struct mmp_struct *)((*bh)->b_data);
-+ if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
-+ brelse(*bh);
-+ *bh = NULL;
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Dump as much information as possible to help the admin.
-+ */
-+void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
-+ const char *function, const char *msg)
-+{
-+ __ext4_warning(sb, function, msg);
-+ __ext4_warning(sb, function,
-+ "MMP failure info: last update time: %llu, last update "
-+ "node: %s, last update device: %s\n",
-+ (long long unsigned int) le64_to_cpu(mmp->mmp_time),
-+ mmp->mmp_nodename, mmp->mmp_bdevname);
-+}
-+
-+/*
-+ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
-+ */
-+static int kmmpd(void *data)
-+{
-+ struct super_block *sb = ((struct mmpd_data *) data)->sb;
-+ struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
-+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-+ struct mmp_struct *mmp;
-+ ext4_fsblk_t mmp_block;
-+ u32 seq = 0;
-+ unsigned long failed_writes = 0;
-+ int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
-+ unsigned mmp_check_interval;
-+ unsigned long last_update_time;
-+ unsigned long diff;
-+ int retval;
-+
-+ mmp_block = le64_to_cpu(es->s_mmp_block);
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+ /*
-+ * Start with the higher mmp_check_interval and reduce it if
-+ * the MMP block is being updated on time.
-+ */
-+ mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
-+ EXT4_MMP_MIN_CHECK_INTERVAL);
-+ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
-+ bdevname(bh->b_bdev, mmp->mmp_bdevname);
-+
-+ memcpy(mmp->mmp_nodename, init_utsname()->nodename,
-+ sizeof(mmp->mmp_nodename));
-+
-+ while (!kthread_should_stop()) {
-+ if (++seq > EXT4_MMP_SEQ_MAX)
-+ seq = 1;
-+
-+ mmp->mmp_seq = cpu_to_le32(seq);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+ last_update_time = jiffies;
-+
-+ retval = write_mmp_block(bh);
-+ /*
-+ * Don't spew too many error messages. Print one every
-+ * (s_mmp_update_interval * 60) seconds.
-+ */
-+ if (retval) {
-+ if ((failed_writes % 60) == 0)
-+ ext4_error(sb, "Error writing to MMP block");
-+ failed_writes++;
-+ }
-+
-+ if (!(le32_to_cpu(es->s_feature_incompat) &
-+ EXT4_FEATURE_INCOMPAT_MMP)) {
-+ ext4_warning(sb, "kmmpd being stopped since MMP feature"
-+ " has been disabled.");
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ if (sb->s_flags & MS_RDONLY) {
-+ ext4_warning(sb, "kmmpd being stopped since filesystem "
-+ "has been remounted as readonly.");
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ diff = jiffies - last_update_time;
-+ if (diff < mmp_update_interval * msecs_to_jiffies(MSEC_PER_SEC))
-+ schedule_timeout_interruptible(mmp_update_interval *
-+ msecs_to_jiffies(MSEC_PER_SEC) - diff);
-+
-+ /*
-+ * We need to make sure that more than mmp_check_interval
-+ * seconds have not passed since writing. If that has happened
-+ * we need to check if the MMP block is as we left it.
-+ */
-+ diff = jiffies - last_update_time;
-+ if (diff > mmp_check_interval * msecs_to_jiffies(MSEC_PER_SEC)) {
-+ struct buffer_head *bh_check = NULL;
-+ struct mmp_struct *mmp_check;
-+
-+ retval = read_mmp_block(sb, &bh_check, mmp_block);
-+ if (retval) {
-+ ext4_error(sb, "error reading MMP data: %d",
-+ retval);
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ mmp_check = (struct mmp_struct *)(bh_check->b_data);
-+ if (mmp->mmp_seq != mmp_check->mmp_seq ||
-+ memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
-+ sizeof(mmp->mmp_nodename))) {
-+ dump_mmp_msg(sb, mmp_check,
-+ "Error while updating MMP info. "
-+ "The filesystem seems to have been"
-+ " multiply mounted.");
-+ ext4_error(sb, "abort");
-+ put_bh(bh_check);
-+ goto failed;
-+ }
-+ put_bh(bh_check);
-+ }
-+
-+ /*
-+ * Adjust the mmp_check_interval depending on how much time
-+ * it took for the MMP block to be written.
-+ */
-+ mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff /
-+ msecs_to_jiffies(MSEC_PER_SEC),
-+ EXT4_MMP_MAX_CHECK_INTERVAL),
-+ EXT4_MMP_MIN_CHECK_INTERVAL);
-+ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
-+ }
-+
-+ /*
-+ * Unmount seems to be clean.
-+ */
-+ mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+
-+ retval = write_mmp_block(bh);
-+
-+failed:
-+ kfree(data);
-+ brelse(bh);
-+ return retval;
-+}
-+
-+/*
-+ * Get a random new sequence number but make sure it is not greater than
-+ * EXT4_MMP_SEQ_MAX.
-+ */
-+static unsigned int mmp_new_seq(void)
-+{
-+ u32 new_seq;
-+
-+ do {
-+ get_random_bytes(&new_seq, sizeof(u32));
-+ } while (new_seq > EXT4_MMP_SEQ_MAX);
-+
-+ return new_seq;
-+}
-+
-+/*
-+ * Protect the filesystem from being mounted more than once.
-+ */
-+int ext4_multi_mount_protect(struct super_block *sb,
-+ ext4_fsblk_t mmp_block)
-+{
-+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-+ struct buffer_head *bh = NULL;
-+ struct mmp_struct *mmp = NULL;
-+ struct mmpd_data *mmpd_data;
-+ u32 seq;
-+ unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
-+ unsigned int wait_time = 0;
-+ int retval;
-+
-+ if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
-+ mmp_block >= ext4_blocks_count(es)) {
-+ ext4_warning(sb, "Invalid MMP block in superblock");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+
-+ if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
-+ mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
-+
-+ /*
-+ * If check_interval in MMP block is larger, use that instead of
-+ * update_interval from the superblock.
-+ */
-+ if (mmp->mmp_check_interval > mmp_check_interval)
-+ mmp_check_interval = mmp->mmp_check_interval;
-+
-+ seq = le32_to_cpu(mmp->mmp_seq);
-+ if (seq == EXT4_MMP_SEQ_CLEAN)
-+ goto skip;
-+
-+ if (seq == EXT4_MMP_SEQ_FSCK) {
-+ dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
-+ goto failed;
-+ }
-+
-+ wait_time = min(mmp_check_interval * 2 + 1,
-+ mmp_check_interval + 60);
-+
-+ /* Print MMP interval if more than 20 secs. */
-+ if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
-+ ext4_warning(sb, "MMP interval %u higher than expected, please"
-+ " wait.\n", wait_time * 2);
-+
-+ if (schedule_timeout_interruptible(msecs_to_jiffies(MSEC_PER_SEC) *
-+ wait_time) != 0) {
-+ ext4_warning(sb, "MMP startup interrupted, failing mount\n");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ if (seq != le32_to_cpu(mmp->mmp_seq)) {
-+ dump_mmp_msg(sb, mmp,
-+ "Device is already active on another node.");
-+ goto failed;
-+ }
-+
-+skip:
-+ /*
-+ * write a new random sequence number.
-+ */
-+ mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
-+
-+ retval = write_mmp_block(bh);
-+ if (retval)
-+ goto failed;
-+
-+ /*
-+ * wait for MMP interval and check mmp_seq.
-+ */
-+ if (schedule_timeout_interruptible(msecs_to_jiffies(MSEC_PER_SEC) *
-+ wait_time) != 0) {
-+ ext4_warning(sb, "MMP startup interrupted, failing mount\n");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ if (seq != le32_to_cpu(mmp->mmp_seq)) {
-+ dump_mmp_msg(sb, mmp,
-+ "Device is already active on another node.");
-+ goto failed;
-+ }
-+
-+ mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
-+ if (!mmpd_data) {
-+ ext4_warning(sb, "not enough memory for mmpd_data");
-+ goto failed;
-+ }
-+ mmpd_data->sb = sb;
-+ mmpd_data->bh = bh;
-+
-+ /*
-+ * Start a kernel thread to update the MMP block periodically.
-+ */
-+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
-+ bdevname(bh->b_bdev,
-+ mmp->mmp_bdevname));
-+ if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ kfree(mmpd_data);
-+ ext4_warning(sb, "Unable to create kmmpd thread for %s.",
-+ sb->s_id);
-+ goto failed;
-+ }
-+
-+ return 0;
-+
-+failed:
-+ brelse(bh);
-+ return 1;
-+}
-+
-+
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -40,6 +40,8 @@
- #include <linux/log2.h>
- #include <linux/crc16.h>
- #include <asm/uaccess.h>
-+#include <linux/kthread.h>
-+#include <linux/utsname.h>
-
- #include "ext4.h"
- #include "ext4_jbd2.h"
-@@ -700,6 +702,8 @@ static void ext4_put_super(struct super_
- invalidate_bdev(sbi->journal_bdev);
- ext4_blkdev_remove(sbi);
- }
-+ if (sbi->s_mmp_tsk)
-+ kthread_stop(sbi->s_mmp_tsk);
- sb->s_fs_info = NULL;
- /*
- * Now that we are completely done shutting down the
-@@ -2799,6 +2803,10 @@ static int ext4_fill_super(struct super_
- needs_recovery = (es->s_last_orphan != 0 ||
- EXT4_HAS_INCOMPAT_FEATURE(sb,
- EXT4_FEATURE_INCOMPAT_RECOVER));
-+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
-+ !(sb->s_flags & MS_RDONLY))
-+ if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
-+ goto failed_mount3;
-
- /*
- * The first inode we look at is the journal inode. Don't try
-@@ -3036,6 +3044,8 @@ failed_mount3:
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
- percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
-+ if (sbi->s_mmp_tsk)
-+ kthread_stop(sbi->s_mmp_tsk);
- failed_mount2:
- for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
-@@ -3544,7 +3554,7 @@ static int ext4_remount(struct super_blo
- struct ext4_mount_options old_opts;
- ext4_group_t g;
- unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
-- int err;
-+ int err = 0;
- #ifdef CONFIG_QUOTA
- int i;
- #endif
-@@ -3666,6 +3676,13 @@ static int ext4_remount(struct super_blo
- goto restore_opts;
- if (!ext4_setup_super(sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
-+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
-+ EXT4_FEATURE_INCOMPAT_MMP))
-+ if (ext4_multi_mount_protect(sb,
-+ le64_to_cpu(es->s_mmp_block))) {
-+ err = -EROFS;
-+ goto restore_opts;
-+ }
- }
- }
- ext4_setup_system_zone(sb);
+++ /dev/null
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c 2015-07-13 22:22:56.000000000 +0300
-+++ linux-stage/fs/ext4/extents.c 2015-07-13 22:24:05.000000000 +0300
-@@ -4318,7 +4318,8 @@ static int ext4_find_delayed_extent(stru
- struct buffer_head *head = NULL;
- unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
-
-- pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
-+ /* we are running under i_data_sem so don't reenter the FS code */
-+ pages = kmalloc(PAGE_SIZE, GFP_NOFS);
- if (pages == NULL)
- return -ENOMEM;
-
+++ /dev/null
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 1d41eef..87b4ea3 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1825,6 +1825,14 @@ extern int ext4_orphan_add(handle_t *, struct inode *);
- extern int ext4_orphan_del(handle_t *, struct inode *);
- extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
- __u32 start_minor_hash, __u32 *next_hash);
-+extern struct inode *ext4_create_inode(handle_t *handle,
-+ struct inode * dir, int mode,
-+ uid_t *owner);
-+extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
-+ struct ext4_dir_entry_2 * de_del,
-+ struct buffer_head * bh);
-+extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
-+ struct inode *inode);
-
- /* resize.c */
- extern int ext4_group_add(struct super_block *sb,
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 6433d87..0f3783b 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -24,6 +24,7 @@
- * Theodore Ts'o, 2002
- */
-
-+#include <linux/module.h>
- #include <linux/fs.h>
- #include <linux/pagemap.h>
- #include <linux/jbd2.h>
-@@ -1691,10 +1692,10 @@ cleanup:
- * ext4_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
--static int ext4_delete_entry(handle_t *handle,
-- struct inode *dir,
-- struct ext4_dir_entry_2 *de_del,
-- struct buffer_head *bh)
-+int ext4_delete_entry(handle_t *handle,
-+ struct inode *dir,
-+ struct ext4_dir_entry_2 *de_del,
-+ struct buffer_head *bh)
- {
- struct ext4_dir_entry_2 *de, *pde;
- unsigned int blocksize = dir->i_sb->s_blocksize;
-@@ -1729,7 +1730,7 @@ static int ext4_delete_entry(handle_t *handle,
- }
- return -ENOENT;
- }
--
-+EXPORT_SYMBOL(ext4_delete_entry);
- /*
- * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
- * since this indicates that nlinks count was previously 1.
-@@ -1776,6 +1777,30 @@ static int ext4_add_nondir(handle_t *handle,
- return err;
- }
-
-+ /* Return locked inode, then the caller can modify the inode's states/flags
-+ * before others finding it. The caller should unlock the inode by itself. */
-+struct inode *ext4_create_inode(handle_t *handle, struct inode *dir, int mode,
-+ uid_t *owner)
-+{
-+ struct inode *inode;
-+
-+ inode = ext4_new_inode(handle, dir, mode, 0,
-+ EXT4_SB(dir->i_sb)->s_inode_goal);
-+ if (!IS_ERR(inode)) {
-+ if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
-+#ifdef CONFIG_EXT4_FS_XATTR
-+ inode->i_op = &ext4_special_inode_operations;
-+#endif
-+ } else {
-+ inode->i_op = &ext4_file_inode_operations;
-+ inode->i_fop = &ext4_file_operations;
-+ ext4_set_aops(inode);
-+ }
-+ }
-+ return inode;
-+}
-+EXPORT_SYMBOL(ext4_create_inode);
-+
- /*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
-@@ -1850,44 +1875,32 @@ retry:
- return err;
- }
-
--static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-+/* Initialize @inode as a subdirectory of @dir, and add the
-+ * "." and ".." entries into the first directory block. */
-+int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
-+ struct inode *inode)
- {
-- handle_t *handle;
-- struct inode *inode;
-- struct buffer_head *dir_block = NULL;
-- struct ext4_dir_entry_2 *de;
-+ struct buffer_head * dir_block;
-+ struct ext4_dir_entry_2 * de;
- unsigned int blocksize = dir->i_sb->s_blocksize;
-- int err, retries = 0;
--
-- if (EXT4_DIR_LINK_MAX(dir))
-- return -EMLINK;
-+ int err = 0;
-
--retry:
-- handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
-- inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
-- &dentry->d_name, 0);
-- err = PTR_ERR(inode);
-- if (IS_ERR(inode))
-- goto out_stop;
--
- inode->i_op = &ext4_dir_inode_operations;
- inode->i_fop = &ext4_dir_operations;
- inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- dir_block = ext4_bread(handle, inode, 0, 1, &err);
- if (!dir_block)
-- goto out_clear_inode;
-+ goto get_out;
- BUFFER_TRACE(dir_block, "get_write_access");
- err = ext4_journal_get_write_access(handle, dir_block);
- if (err)
-- goto out_clear_inode;
-+ goto get_out;
- de = (struct ext4_dir_entry_2 *) dir_block->b_data;
- de->inode = cpu_to_le32(inode->i_ino);
- de->name_len = 1;
-@@ -1906,18 +1919,46 @@ retry:
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, dir_block);
- if (err)
-- goto out_clear_inode;
-+ goto get_out;
- err = ext4_mark_inode_dirty(handle, inode);
-- if (!err)
-- err = ext4_add_entry(handle, dentry, inode);
-- if (err) {
--out_clear_inode:
-- clear_nlink(inode);
-- unlock_new_inode(inode);
-- ext4_mark_inode_dirty(handle, inode);
-- iput(inode);
-+get_out:
-+ brelse(dir_block);
-+ return err;
-+}
-+EXPORT_SYMBOL(ext4_add_dot_dotdot);
-+
-+
-+static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-+{
-+ handle_t *handle;
-+ struct inode *inode;
-+ int err, retries = 0;
-+
-+ if (EXT4_DIR_LINK_MAX(dir))
-+ return -EMLINK;
-+
-+retry:
-+ handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ if (IS_DIRSYNC(dir))
-+ ext4_handle_sync(handle);
-+
-+ inode = ext4_new_inode(handle, dir, S_IFDIR | mode, &dentry->d_name, 0);
-+ err = PTR_ERR(inode);
-+ if (IS_ERR(inode))
- goto out_stop;
-- }
-+
-+ err = ext4_add_dot_dotdot(handle, dir, inode);
-+ if (err)
-+ goto out_clear_inode;
-+
-+ err = ext4_add_entry(handle, dentry, inode);
-+ if (err)
-+ goto out_clear_inode;
- ext4_inc_count(handle, dir);
- ext4_update_dx_flag(dir);
- err = ext4_mark_inode_dirty(handle, dir);
-@@ -1926,11 +1967,16 @@ out_clear_inode:
- d_instantiate(dentry, inode);
- unlock_new_inode(inode);
- out_stop:
-- brelse(dir_block);
- ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
- return err;
-+out_clear_inode:
-+ clear_nlink(inode);
-+ unlock_new_inode(inode);
-+ ext4_mark_inode_dirty(handle, inode);
-+ iput(inode);
-+ goto out_stop;
- }
-
- /*
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2011-03-11 14:17:02.000000000 +0800
-+++ linux-stage/fs/ext4/ext4.h 2011-03-11 14:20:08.269063193 +0800
-@@ -999,11 +999,14 @@
-
- /* tunables */
- unsigned long s_stripe;
-- unsigned int s_mb_stream_request;
-+ unsigned long s_mb_small_req;
-+ unsigned long s_mb_large_req;
- unsigned int s_mb_max_to_scan;
- unsigned int s_mb_min_to_scan;
- unsigned int s_mb_stats;
- unsigned int s_mb_order2_reqs;
-+ unsigned long *s_mb_prealloc_table;
-+ unsigned long s_mb_prealloc_table_size;
- unsigned int s_mb_group_prealloc;
- unsigned int s_max_writeback_mb_bump;
- /* where last allocation was done - for stream allocation */
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
-@@ -3028,6 +3028,11 @@ static int ext4_da_writepages(struct add
- if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
- return -EROFS;
-
-+ if (wbc->nr_to_write < sbi->s_mb_small_req) {
-+ nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
-+ wbc->nr_to_write = sbi->s_mb_small_req;
-+ }
-+
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
-
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c 2011-03-11 14:03:32.000000000 +0800
-+++ linux-stage/fs/ext4/mballoc.c 2011-03-11 14:44:49.106543493 +0800
-@@ -1823,6 +1823,25 @@
-
- }
- }
-+
-+static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
-+{
-+ int i;
-+
-+ if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
-+ return;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (sbi->s_mb_prealloc_table[i] == 0) {
-+ sbi->s_mb_prealloc_table[i] = value;
-+ return;
-+ }
-+
-+ /* they should add values in order */
-+ if (value <= sbi->s_mb_prealloc_table[i])
-+ return;
-+ }
-+}
-
- static int ext4_mb_good_group(struct ext4_allocation_context *ac,
- ext4_group_t group, int cr)
-@@ -2173,6 +2193,80 @@
- .show = ext4_mb_seq_groups_show,
- };
-
-+#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
-+
-+static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ int len = 0;
-+ int i;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
-+ len += sprintf(page + len, "%ld ",
-+ sbi->s_mb_prealloc_table[i]);
-+ len += sprintf(page + len, "\n");
-+
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext4_mb_prealloc_table_proc_write(struct file *file,
-+ const char __user *buf,
-+ unsigned long cnt, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ unsigned long value;
-+ unsigned long prev = 0;
-+ char str[128];
-+ char *cur;
-+ char *end;
-+ unsigned long *new_table;
-+ int num = 0;
-+ int i = 0;
-+
-+ if (cnt >= sizeof(str))
-+ return -EINVAL;
-+ if (copy_from_user(str, buf, cnt))
-+ return -EFAULT;
-+
-+ num = 0;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ if (value == 0)
-+ break;
-+ if (value <= prev)
-+ return -EINVAL;
-+ prev = value;
-+ num++;
-+ }
-+
-+ new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
-+ if (new_table == NULL)
-+ return -ENOMEM;
-+ kfree(sbi->s_mb_prealloc_table);
-+ memset(new_table, 0, num * sizeof(*new_table));
-+ sbi->s_mb_prealloc_table = new_table;
-+ sbi->s_mb_prealloc_table_size = num;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end && i < num) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ ext4_mb_prealloc_table_add(sbi, value);
-+ i++;
-+ }
-+
-+ return cnt;
-+}
-+
- static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
- {
- struct super_block *sb = PDE(inode)->data;
-@@ -2397,14 +2497,6 @@ int ext4_mb_init(struct super_block *sb,
- i++;
- } while (i <= sb->s_blocksize_bits + 1);
-
-- /* init file for buddy data */
-- ret = ext4_mb_init_backend(sb);
-- if (ret != 0) {
-- kfree(sbi->s_mb_offsets);
-- kfree(sbi->s_mb_maxs);
-- return ret;
-- }
--
- spin_lock_init(&sbi->s_md_lock);
- spin_lock_init(&sbi->s_bal_lock);
-
-@@ -2411,12 +2505,56 @@
- sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
- sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
- sbi->s_mb_stats = MB_DEFAULT_STATS;
-- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
- sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
-- sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
-+
-+ if (sbi->s_stripe == 0) {
-+ sbi->s_mb_prealloc_table_size = 10;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ ext4_mb_prealloc_table_add(sbi, 4);
-+ ext4_mb_prealloc_table_add(sbi, 8);
-+ ext4_mb_prealloc_table_add(sbi, 16);
-+ ext4_mb_prealloc_table_add(sbi, 32);
-+ ext4_mb_prealloc_table_add(sbi, 64);
-+ ext4_mb_prealloc_table_add(sbi, 128);
-+ ext4_mb_prealloc_table_add(sbi, 256);
-+ ext4_mb_prealloc_table_add(sbi, 512);
-+ ext4_mb_prealloc_table_add(sbi, 1024);
-+ ext4_mb_prealloc_table_add(sbi, 2048);
-+
-+ sbi->s_mb_small_req = 256;
-+ sbi->s_mb_large_req = 1024;
-+ sbi->s_mb_group_prealloc = 512;
-+ } else {
-+ sbi->s_mb_prealloc_table_size = 3;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
-+
-+ sbi->s_mb_small_req = sbi->s_stripe;
-+ sbi->s_mb_large_req = sbi->s_stripe * 8;
-+ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-+ }
-
- sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
- if (sbi->s_locality_groups == NULL) {
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return -ENOMEM;
-@@ -2430,9 +2568,27 @@
- spin_lock_init(&lg->lg_prealloc_lock);
- }
-
-+ /* init file for buddy data */
-+ ret = ext4_mb_init_backend(sb);
-+ if (ret != 0) {
-+ kfree(sbi->s_mb_prealloc_table);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return ret;
-+ }
-+
-- if (sbi->s_proc)
-+ if (sbi->s_proc) {
-+ struct proc_dir_entry *p;
- proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
- &ext4_mb_seq_groups_fops, sb);
-+ p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
-+ S_IRUGO | S_IWUSR, sbi->s_proc);
-+ if (p) {
-+ p->data = sbi;
-+ p->read_proc = ext4_mb_prealloc_table_proc_read;
-+ p->write_proc = ext4_mb_prealloc_table_proc_write;
-+ }
-+ }
-
- if (sbi->s_journal)
- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
-@@ -2483,6 +2639,7 @@
- kfree(sbi->s_group_info[i]);
- kfree(sbi->s_group_info);
- }
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- if (sbi->s_buddy_cache)
-@@ -2512,8 +2668,10 @@
- }
-
- free_percpu(sbi->s_locality_groups);
-- if (sbi->s_proc)
-+ if (sbi->s_proc) {
- remove_proc_entry("mb_groups", sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+ }
-
- return 0;
- }
-@@ -2807,11 +2965,12 @@
- ext4_mb_normalize_request(struct ext4_allocation_context *ac,
- struct ext4_allocation_request *ar)
- {
-- int bsbits, max;
-+ int bsbits, i, wind;
- ext4_lblk_t end;
-- loff_t size, orig_size, start_off;
-+ loff_t size, orig_size;
- ext4_lblk_t start, orig_start;
- struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
-+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_prealloc_space *pa;
-
- /* do normalize only data requests, metadata requests
-@@ -2841,49 +3000,35 @@
- size = size << bsbits;
- if (size < i_size_read(ac->ac_inode))
- size = i_size_read(ac->ac_inode);
-+ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
-
-- /* max size of free chunks */
-- max = 2 << bsbits;
-+ start = wind = 0;
-
--#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
-- (req <= (size) || max <= (chunk_size))
-+ /* let's choose preallocation window depending on file size */
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (size <= sbi->s_mb_prealloc_table[i]) {
-+ wind = sbi->s_mb_prealloc_table[i];
-+ break;
-+ }
-+ }
-+ size = wind;
-
-- /* first, try to predict filesize */
-- /* XXX: should this table be tunable? */
-- start_off = 0;
-- if (size <= 16 * 1024) {
-- size = 16 * 1024;
-- } else if (size <= 32 * 1024) {
-- size = 32 * 1024;
-- } else if (size <= 64 * 1024) {
-- size = 64 * 1024;
-- } else if (size <= 128 * 1024) {
-- size = 128 * 1024;
-- } else if (size <= 256 * 1024) {
-- size = 256 * 1024;
-- } else if (size <= 512 * 1024) {
-- size = 512 * 1024;
-- } else if (size <= 1024 * 1024) {
-- size = 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (21 - bsbits)) << 21;
-- size = 2 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (22 - bsbits)) << 22;
-- size = 4 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
-- (8<<20)>>bsbits, max, 8 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (23 - bsbits)) << 23;
-- size = 8 * 1024 * 1024;
-- } else {
-- start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
-- size = ac->ac_o_ex.fe_len << bsbits;
-+ if (wind == 0) {
-+ __u64 tstart, tend;
-+ /* file is quite large, we now preallocate with
-+ * the biggest configured window with regart to
-+ * logical offset */
-+ wind = sbi->s_mb_prealloc_table[i - 1];
-+ tstart = ac->ac_o_ex.fe_logical;
-+ do_div(tstart, wind);
-+ start = tstart * wind;
-+ tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
-+ do_div(tend, wind);
-+ tend = tend * wind + wind;
-+ size = tend - start;
- }
-- orig_size = size = size >> bsbits;
-- orig_start = start = start_off >> bsbits;
-+ orig_size = size;
-+ orig_start = start;
-
- /* don't cover already allocated blocks in selected range */
- if (ar->pleft && start <= ar->lleft) {
-@@ -2955,7 +3100,6 @@
- }
- BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
- start > ac->ac_o_ex.fe_logical);
-- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
-
- /* now prepare goal request */
-
-@@ -3939,11 +4083,19 @@
-
- /* don't use group allocation for large files */
- size = max(size, isize);
-- if (size > sbi->s_mb_stream_request) {
-+ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
-+ (size >= sbi->s_mb_large_req)) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
- return;
- }
-
-+ /*
-+ * request is so large that we don't care about
-+ * streaming - it overweights any possible seek
-+ */
-+ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
-+ return;
-+
- BUG_ON(ac->ac_lg != NULL);
- /*
- * locality group prealloc space are per cpu. The reason for having
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c 2011-03-11 14:16:56.000000000 +0800
-+++ linux-stage/fs/ext4/super.c 2011-03-11 14:19:24.664467626 +0800
-@@ -2632,7 +2632,8 @@
- EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
--EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-+EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
-+EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
- EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
- EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
-
-@@ -2647,7 +2648,8 @@
- ATTR_LIST(mb_max_to_scan),
- ATTR_LIST(mb_min_to_scan),
- ATTR_LIST(mb_order2_req),
-- ATTR_LIST(mb_stream_req),
-+ ATTR_LIST(mb_small_req),
-+ ATTR_LIST(mb_large_req),
- ATTR_LIST(mb_group_prealloc),
- ATTR_LIST(max_writeback_mb_bump),
- NULL,
+++ /dev/null
-commit 21f976975cbecbdaf23ceeacc1cab2b1c05a028e
-Author: Jan Kara <jack@suse.cz>
-Date: Mon Apr 4 15:33:39 2011 -0400
-
- ext4: remove unnecessary [cm]time update of quota file
-
- It is not necessary to update [cm]time of quota file on each quota
- file write and it wastes journal space and IO throughput with inode
- writes. So just remove the updating from ext4_quota_write() and only
- update times when quotas are being turned off. Userspace cannot get
- anything reliable from quota files while they are used by the kernel
- anyway.
-
- Signed-off-by: Jan Kara <jack@suse.cz>
- Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-
-Index: linux-stage/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.h 2012-06-26 11:26:25.000000000 +0200
-+++ linux-stage/fs/ext4/ext4_jbd2.h 2012-06-26 11:35:31.025105000 +0200
-@@ -88,8 +88,8 @@
-
- #ifdef CONFIG_QUOTA
- /* Amount of blocks needed for quota update - we know that the structure was
-- * allocated so we need to update only inode+data */
--#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-+ * allocated so we need to update only data block */
-+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
- /* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
- #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c 2012-06-26 11:35:09.000000000 +0200
-+++ linux-stage/fs/ext4/super.c 2012-06-26 11:37:30.905374000 +0200
-@@ -4582,6 +4582,7 @@ static int ext4_quota_on(struct super_bl
- static int ext4_quota_off(struct super_block *sb, int type, int remount)
- {
- struct quota_info *dqopt = sb_dqopt(sb);
-+ int cnt;
-
- mutex_lock(&dqopt->dqonoff_mutex);
- if (!sb_any_quota_loaded(sb)) {
-@@ -4598,6 +4599,37 @@ static int ext4_quota_off(struct super_b
- up_read(&sb->s_umount);
- }
-
-+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-+ struct inode *inode;
-+ handle_t *handle;
-+
-+ if (type != -1 && cnt != type)
-+ continue;
-+
-+ mutex_lock(&dqopt->dqonoff_mutex);
-+ inode = dqopt->files[cnt];
-+ if (!sb_has_quota_loaded(sb, cnt) || !inode) {
-+ mutex_unlock(&dqopt->dqonoff_mutex);
-+ continue;
-+ }
-+
-+ inode = igrab(inode);
-+ mutex_unlock(&dqopt->dqonoff_mutex);
-+
-+ if (!inode)
-+ continue;
-+
-+ /* Update modification times of quota files when userspace can
-+ * start looking at them */
-+ handle = ext4_journal_start(inode, 1);
-+ if (!IS_ERR(handle)) {
-+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-+ ext4_mark_inode_dirty(handle, inode);
-+ ext4_journal_stop(handle);
-+ }
-+ iput(inode);
-+ }
-+
- return vfs_quota_off(sb, type, remount);
- }
-
-@@ -4696,9 +4728,8 @@ out:
- if (inode->i_size < off + len) {
- i_size_write(inode, off + len);
- EXT4_I(inode)->i_disksize = inode->i_size;
-+ ext4_mark_inode_dirty(handle, inode);
- }
-- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-- ext4_mark_inode_dirty(handle, inode);
- mutex_unlock(&inode->i_mutex);
- return len;
- }
+++ /dev/null
-From: Aditya Kali <adityakali@google.com>
-
-This patch is an attempt towards supporting quotas as first class
-feature in ext4. It is based on the proposal at:
-https://ext4.wiki.kernel.org/index.php/Design_For_1st_Class_Quota_in_Ext4
-This patch introduces a new feature - EXT4_FEATURE_RO_COMPAT_QUOTA which, when
-turned on, enables quota accounting at mount time iteself. Also, the
-quota inodes are stored in two additional superblock fields.
-Some changes introduced by this patch that should be pointed out are:
-1) Two new ext4-superblock fields - s_usr_quota_inum and s_grp_quota_inum
- for storing the quota inodes in use.
-2) If the QUOTA feature and corresponding quota inodes are set in superblock,
- Quotas are turned on at mount time irrespective of the quota mount options.
- Thus the mount options 'quota', 'usrquota' and 'grpquota' are completely
- ignored with the new QUOTA feature flag.
-3) Default quota inodes are: inode#3 for tracking userquota and inode#4 for
- tracking group quota. The superblock fields can be set to use other inodes
- as well.
-4) mke2fs or tune2fs will initialize these inodes when quota feature is
- being set. The default reserved inodes will not be visible to user as
- regular files.
-5) Once quotas are turned on, they cannot be turned off while the FS is
- mounted. This is because we do not want to let the quota get inconsistent.
-6) With the QUOTA feature set, since the quota inodes are hidden, some of the
- utilities from quota-tools will no longer work correctly. Instead, e2fsprogs
- will include support for fixing the quota files.
-7) Support is only for the new V2 quota file format.
-
-Signed-off-by: Aditya Kali <adityakali@google.com>
----
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -185,6 +185,8 @@ typedef struct ext4_io_end {
- */
- #define EXT4_BAD_INO 1 /* Bad blocks inode */
- #define EXT4_ROOT_INO 2 /* Root inode */
-+#define EXT4_USR_QUOTA_INO 3 /* User quota inode */
-+#define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */
- #define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */
- #define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */
- #define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */
-@@ -1042,7 +1044,9 @@ struct ext4_super_block {
- __u8 s_last_error_func[32]; /* function where the error happened */
- #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
- __u8 s_mount_opts[64];
-- __le32 s_reserved[112]; /* Padding to the end of the block */
-+ __le32 s_usr_quota_inum; /* inode for tracking user quota */
-+ __le32 s_grp_quota_inum; /* inode for tracking group quota */
-+ __le32 s_reserved[110]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -1116,6 +1120,7 @@ struct ext4_sb_info {
- #ifdef CONFIG_QUOTA
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
-+ unsigned long s_qf_inums[MAXQUOTAS]; /* Quota file inodes */
- #endif
- unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
- struct rb_root system_blks;
-@@ -1216,6 +1221,8 @@ static inline struct timespec ext4_curre
- static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
- {
- return ino == EXT4_ROOT_INO ||
-+ ino == EXT4_USR_QUOTA_INO ||
-+ ino == EXT4_GRP_QUOTA_INO ||
- ino == EXT4_JOURNAL_INO ||
- ino == EXT4_RESIZE_INO ||
- (ino >= EXT4_FIRST_INO(sb) &&
-@@ -1320,6 +1327,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
- #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-+#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
-
- #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -1352,7 +1360,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
- EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
- EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
-- EXT4_FEATURE_RO_COMPAT_HUGE_FILE)
-+ EXT4_FEATURE_RO_COMPAT_HUGE_FILE| \
-+ EXT4_FEATURE_RO_COMPAT_QUOTA)
-
- /*
- * Default values for user and/or group using reserved blocks
-Index: linux-stage/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.h 2012-06-26 11:35:31.025105000 +0200
-+++ linux-stage/fs/ext4/ext4_jbd2.h 2012-06-26 11:37:38.250631000 +0200
-@@ -89,14 +89,20 @@
- #ifdef CONFIG_QUOTA
- /* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only data block */
--#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
-+#define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ 1 : 0)
- /* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
--#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
-+#define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
-+ +3+DQUOT_INIT_REWRITE) : 0)
-
--#define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
-+#define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
-+ +3+DQUOT_DEL_REWRITE) : 0)
- #else
- #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
- #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -115,6 +115,11 @@ void ext4_kvfree(void *ptr)
-
- static int bigendian_extents;
-
-+#ifdef CONFIG_QUOTA
-+static int ext4_acct_on(struct super_block *sb);
-+static int ext4_acct_off(struct super_block *sb);
-+#endif
-+
- ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *bg)
- {
-@@ -703,6 +708,12 @@ static void ext4_put_super(struct super_
-
- ext4_unregister_li_request(sb);
-
-+#ifdef CONFIG_QUOTA
-+ /* disable usage tracking which was enabled at mount time */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ ext4_acct_off(sb);
-+#endif
-+
- flush_workqueue(sbi->dio_unwritten_wq);
- destroy_workqueue(sbi->dio_unwritten_wq);
-
-@@ -2162,14 +2173,22 @@ static void ext4_orphan_cleanup(struct s
- #ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sb->s_flags |= MS_ACTIVE;
-- /* Turn on quotas so that they are updated correctly */
-- for (i = 0; i < MAXQUOTAS; i++) {
-- if (EXT4_SB(sb)->s_qf_names[i]) {
-- int ret = ext4_quota_on_mount(sb, i);
-- if (ret < 0)
-- ext4_msg(sb, KERN_ERR,
-- "Cannot turn on journaled "
-- "quota: error %d", ret);
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ int ret;
-+ ret = ext4_acct_on(sb);
-+ if (ret)
-+ ext4_msg(sb, KERN_ERR, "Failed to turn on usage "
-+ "tracking for quota: error %d", ret);
-+ } else {
-+ /* Turn on quotas so that they are updated correctly */
-+ for (i = 0; i < MAXQUOTAS; i++) {
-+ if (EXT4_SB(sb)->s_qf_names[i]) {
-+ int ret = ext4_quota_on_mount(sb, i);
-+ if (ret < 0)
-+ ext4_msg(sb, KERN_ERR,
-+ "Cannot turn on journaled "
-+ "quota: error %d", ret);
-+ }
- }
- }
- #endif
-@@ -2213,10 +2232,14 @@ static void ext4_orphan_cleanup(struct s
- ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
- PLURAL(nr_truncates));
- #ifdef CONFIG_QUOTA
-- /* Turn quotas off */
-- for (i = 0; i < MAXQUOTAS; i++) {
-- if (sb_dqopt(sb)->files[i])
-- vfs_quota_off(sb, i, 0);
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ ext4_acct_off(sb);
-+ } else {
-+ /* Turn quotas off */
-+ for (i = 0; i < MAXQUOTAS; i++) {
-+ if (sb_dqopt(sb)->files[i])
-+ vfs_quota_off(sb, i, 0);
-+ }
- }
- #endif
- sb->s_flags = s_flags; /* Restore MS_RDONLY status */
-@@ -3408,6 +3431,15 @@ static int ext4_fill_super(struct super_
- #ifdef CONFIG_QUOTA
- sb->s_qcop = &ext4_qctl_operations;
- sb->dq_op = &ext4_quota_operations;
-+
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ /* Use new qctl operations with quota on function that does not
-+ * require user specified quota file path. */
-+ sb->s_qcop = &ext4_qctl_operations;
-+
-+ sbi->s_qf_inums[USRQUOTA] = es->s_usr_quota_inum;
-+ sbi->s_qf_inums[GRPQUOTA] = es->s_grp_quota_inum;
-+ }
- #endif
- INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
- mutex_init(&sbi->s_orphan_lock);
-@@ -3633,13 +3665,40 @@ no_journal:
- } else
- descr = "out journal";
-
-- ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-- "Opts: %s%s", descr, sbi->s_es->s_mount_opts,
-+#ifdef CONFIG_QUOTA
-+ /* Enable space tracking during mount, enforcement can be enabled/disable
-+ * later with quota_on/off */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
-+ !(sb->s_flags & MS_RDONLY)) {
-+ ret = ext4_acct_on(sb);
-+ if (ret) {
-+ ext4_msg(sb, KERN_ERR, "Can't enable usage tracking on "
-+ "a filesystem with the QUOTA feature set");
-+ goto failed_mount8;
-+ }
-+ }
-+#else
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
-+ !(sb->s_flags & MS_RDONLY))
-+ ext4_msg(sb, KERN_WARNING, "Mounting a filesystem with the "
-+ "QUOTA feature set whereas the kernel does not "
-+ "support quota, e2fsck will be required to fix usage "
-+ "information");
-+
-+#endif /* CONFIG_QUOTA */
-+
-+ ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. quota=%s. "
-+ "Opts: %s%s", descr, sb_any_quota_loaded(sb) ? "on" : "off",
-+ sbi->s_es->s_mount_opts,
- *sbi->s_es->s_mount_opts ? "; " : "");
-
- lock_kernel();
- return 0;
-
-+#ifdef CONFIG_QUOTA
-+failed_mount8:
-+ kobject_del(&sbi->s_kobj);
-+#endif
- cantfind_ext4:
- if (!silent)
- ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
-@@ -3991,6 +4050,12 @@ static int ext4_commit_super(struct supe
- &EXT4_SB(sb)->s_freeblocks_counter));
- es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
- &EXT4_SB(sb)->s_freeinodes_counter));
-+#ifdef CONFIG_QUOTA
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ es->s_usr_quota_inum = EXT4_SB(sb)->s_qf_inums[USRQUOTA];
-+ es->s_grp_quota_inum = EXT4_SB(sb)->s_qf_inums[GRPQUOTA];
-+ }
-+#endif
- sb->s_dirt = 0;
- BUFFER_TRACE(sbh, "marking dirty");
- mark_buffer_dirty(sbh);
-@@ -4552,6 +4617,22 @@ static int ext4_quota_on(struct super_bl
- int err;
- struct path path;
-
-+ /* When QUOTA feature is set, quota on enables enforcement, accounting
-+ * being already enabled at mount time */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ struct inode *qf_inode;
-+
-+ if (!EXT4_SB(sb)->s_qf_inums[type])
-+ return -EINVAL;
-+ qf_inode = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[type]);
-+ if (IS_ERR(qf_inode))
-+ return PTR_ERR(qf_inode);
-+ err = vfs_quota_enable(qf_inode, type, QFMT_VFS_V1,
-+ DQUOT_LIMITS_ENABLED);
-+ iput(qf_inode);
-+ return err;
-+ }
-+
- if (!test_opt(sb, QUOTA))
- return -EINVAL;
- /* When remounting, no checks are needed and in fact, name is NULL */
-@@ -4651,9 +4732,114 @@ static int ext4_quota_off(struct super_b
- iput(inode);
- }
-
-+ /* When QUOTA feature is set, quota off just disables enforcement but
-+ * leaves accounting on */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED);
-+
- return vfs_quota_off(sb, type, remount);
- }
-
-+/*
-+ * New quota_on function that is used to turn accounting on when QUOTA
-+ * feature is set.
-+ */
-+static int ext4_acct_on(struct super_block *sb)
-+{
-+ struct inode *qf_inode[MAXQUOTAS];
-+ int rc;
-+
-+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) ||
-+ !EXT4_SB(sb)->s_qf_inums[USRQUOTA] ||
-+ !EXT4_SB(sb)->s_qf_inums[GRPQUOTA])
-+ return -EINVAL;
-+
-+ qf_inode[USRQUOTA] = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[USRQUOTA]);
-+ if (IS_ERR(qf_inode[USRQUOTA])) {
-+ EXT4_SB(sb)->s_qf_inums[USRQUOTA] = 0;
-+ return PTR_ERR(qf_inode[USRQUOTA]);
-+ }
-+ qf_inode[GRPQUOTA] = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[GRPQUOTA]);
-+ if (IS_ERR(qf_inode[GRPQUOTA])) {
-+ iput(qf_inode[USRQUOTA]);
-+ EXT4_SB(sb)->s_qf_inums[GRPQUOTA] = 0;
-+ return PTR_ERR(qf_inode[GRPQUOTA]);
-+ }
-+
-+ /*
-+ * When we journal data on quota file, we have to flush journal to see
-+ * all updates to the file when we bypass pagecache...
-+ */
-+ if (EXT4_SB(sb)->s_journal) {
-+ /*
-+ * We don't need to lock updates but journal_flush() could
-+ * otherwise be livelocked...
-+ */
-+ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
-+ rc = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
-+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
-+ if (rc) {
-+ iput(qf_inode[USRQUOTA]);
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+ }
-+ }
-+
-+ /* only enable quota accounting by default */
-+ rc = vfs_quota_enable(qf_inode[USRQUOTA], USRQUOTA, QFMT_VFS_V1,
-+ DQUOT_USAGE_ENABLED);
-+ iput(qf_inode[USRQUOTA]);
-+ if (rc) {
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+ }
-+ rc = vfs_quota_enable(qf_inode[GRPQUOTA], GRPQUOTA, QFMT_VFS_V1,
-+ DQUOT_USAGE_ENABLED);
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+}
-+
-+/*
-+ * New quota_on function that is used to turn off accounting when QUOTA feature
-+ * is set.
-+ */
-+static int ext4_acct_off(struct super_block *sb)
-+{
-+ int type, rc = 0;
-+
-+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ return -EINVAL;
-+
-+ for (type = 0; type < MAXQUOTAS; type++) {
-+ struct inode *inode = sb_dqopt(sb)->files[type];
-+ handle_t *handle;
-+
-+ if (!inode)
-+ continue;
-+ /* Update modification times of quota files when userspace can
-+ * start looking at them */
-+ handle = ext4_journal_start(inode, 1);
-+ if (IS_ERR(handle))
-+ goto out;
-+
-+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-+ ext4_mark_inode_dirty(handle, inode);
-+ ext4_journal_stop(handle);
-+ }
-+
-+out:
-+ for (type = 0; type < MAXQUOTAS; type++) {
-+ int ret;
-+ ret = vfs_quota_disable(sb, type,
-+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-+ if (!rc && ret)
-+ rc = ret;
-+ }
-+ return rc;
-+}
-+
-+
-+
- /* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+++ /dev/null
-commit ca0e05e4b15193aeba72b995e90de990db7f8304
-Author: Dmitry Monakhov <dmonakhov@openvz.org>
-Date: Sun Aug 1 17:48:36 2010 -0400
-
- ext4: force block allocation on quota_off
-
- Perform full sync procedure so that any delayed allocation blocks are
- allocated so quota will be consistent.
-
- Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
- Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c 2012-06-26 09:37:06.039508000 +0200
-+++ linux-stage/fs/ext4/super.c 2012-06-26 11:35:09.824099000 +0200
-@@ -1104,6 +1104,7 @@ static int ext4_mark_dquot_dirty(struct
- static int ext4_write_info(struct super_block *sb, int type);
- static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *path, int remount);
-+static int ext4_quota_off(struct super_block *sb, int type, int remount);
- static int ext4_quota_on_mount(struct super_block *sb, int type);
- static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
- size_t len, loff_t off);
-@@ -1173,7 +1174,7 @@ static const struct dquot_operations ext
-
- static const struct quotactl_ops ext4_qctl_operations = {
- .quota_on = ext4_quota_on,
-- .quota_off = vfs_quota_off,
-+ .quota_off = ext4_quota_off,
- .quota_sync = vfs_quota_sync,
- .get_info = vfs_get_dqinfo,
- .set_info = vfs_set_dqinfo,
-@@ -4578,6 +4579,28 @@ static int ext4_quota_on(struct super_bl
- return err;
- }
-
-+static int ext4_quota_off(struct super_block *sb, int type, int remount)
-+{
-+ struct quota_info *dqopt = sb_dqopt(sb);
-+
-+ mutex_lock(&dqopt->dqonoff_mutex);
-+ if (!sb_any_quota_loaded(sb)) {
-+ /* nothing to do */
-+ mutex_unlock(&dqopt->dqonoff_mutex);
-+ return 0;
-+ }
-+ mutex_unlock(&dqopt->dqonoff_mutex);
-+
-+ /* Force all delayed allocation blocks to be allocated. */
-+ if (test_opt(sb, DELALLOC)) {
-+ down_read(&sb->s_umount);
-+ sync_filesystem(sb);
-+ up_read(&sb->s_umount);
-+ }
-+
-+ return vfs_quota_off(sb, type, remount);
-+}
-+
- /* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+++ /dev/null
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -3613,6 +3613,18 @@ static int ext4_fill_super(struct super_
-
- sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
-
-+ /*
-+ * The journal may have updated the bg summary counts, so we
-+ * need to update the global counters.
-+ */
-+ percpu_counter_set(&sbi->s_freeblocks_counter,
-+ ext4_count_free_blocks(sb));
-+ percpu_counter_set(&sbi->s_freeinodes_counter,
-+ ext4_count_free_inodes(sb));
-+ percpu_counter_set(&sbi->s_dirs_counter,
-+ ext4_count_dirs(sb));
-+ percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
-+
- no_journal:
-
- if (test_opt(sb, NOBH)) {
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4_extents.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_extents.h
-+++ linux-stage/fs/ext4/ext4_extents.h
-@@ -113,6 +113,7 @@ struct ext4_extent_header {
- * Truncate uses it to simulate recursive walking.
- */
- struct ext4_ext_path {
-+ unsigned long p_generation;
- ext4_fsblk_t p_block;
- __u16 p_depth;
- struct ext4_extent *p_ext;
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c
-+++ linux-stage/fs/ext4/extents.c
-@@ -1855,7 +1855,7 @@ int ext4_ext_walk_space(struct inode *in
- {
- struct ext4_ext_path *path = NULL;
- struct ext4_ext_cache cbex;
-- struct ext4_extent *ex;
-+ struct ext4_extent _ex, *ex;
- ext4_lblk_t next, start = 0, end = 0;
- ext4_lblk_t last = block + num;
- int depth, exists, err = 0;
-@@ -1868,21 +1868,29 @@ int ext4_ext_walk_space(struct inode *in
- /* find extent for this block */
- down_read(&EXT4_I(inode)->i_data_sem);
- path = ext4_ext_find_extent(inode, block, path);
-- up_read(&EXT4_I(inode)->i_data_sem);
- if (IS_ERR(path)) {
-+ up_read(&EXT4_I(inode)->i_data_sem);
- err = PTR_ERR(path);
- path = NULL;
- break;
- }
-
-+ path[0].p_generation = EXT4_I(inode)->i_ext_generation;
-+
- depth = ext_depth(inode);
- if (unlikely(path[depth].p_hdr == NULL)) {
-+ up_read(&EXT4_I(inode)->i_data_sem);
- EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
- err = -EIO;
- break;
- }
-- ex = path[depth].p_ext;
-+ ex = NULL;
-+ if (path[depth].p_ext) {
-+ _ex = *path[depth].p_ext;
-+ ex = &_ex;
-+ }
- next = ext4_ext_next_allocated_block(path);
-+ up_read(&EXT4_I(inode)->i_data_sem);
-
- exists = 0;
- if (!ex) {
-@@ -1936,7 +1944,7 @@ int ext4_ext_walk_space(struct inode *in
- err = -EIO;
- break;
- }
-- err = func(inode, path, &cbex, ex, cbdata);
-+ err = func(inode, path, &cbex, NULL, cbdata);
- ext4_ext_drop_refs(path);
-
- if (err < 0)
+++ /dev/null
-From 5930ea643805feb50a2f8383ae12eb6f10935e49 Mon Sep 17 00:00:00 2001
-From: Theodore Ts'o <tytso@mit.edu>
-Date: Wed, 31 Aug 2011 12:02:51 -0400
-Subject: [PATCH] ext4: call ext4_handle_dirty_metadata with correct inode in
- ext4_dx_add_entry
-
-ext4_dx_add_entry manipulates bh2 and frames[0].bh, which are two buffer_heads
-that point to directory blocks assigned to the directory inode. However, the
-function calls ext4_handle_dirty_metadata with the inode of the file that's
-being added to the directory, not the directory inode itself. Therefore,
-correct the code to dirty the directory buffers with the directory inode, not
-the file inode.
-
-Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Cc: stable@kernel.org
----
- fs/ext4/namei.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index f0abe43..a067835 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -1585,7 +1585,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
- dxtrace(dx_show_index("node", frames[1].entries));
- dxtrace(dx_show_index("node",
- ((struct dx_node *) bh2->b_data)->entries));
-- err = ext4_handle_dirty_metadata(handle, inode, bh2);
-+ err = ext4_handle_dirty_metadata(handle, dir, bh2);
- if (err)
- goto journal_error;
- brelse (bh2);
-@@ -1611,7 +1611,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
- if (err)
- goto journal_error;
- }
-- ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
-+ err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
-+ if (err) {
-+ ext4_std_error(inode->i_sb, err);
-+ goto cleanup;
-+ }
- }
- de = do_split(handle, dir, &bh, frame, &hinfo, &err);
- if (!de)
---
-2.1.0
-
+++ /dev/null
-From f18a5f21c25707b4fe64b326e2b4d150565e7300 Mon Sep 17 00:00:00 2001
-From: Theodore Ts'o <tytso@mit.edu>
-Date: Mon, 1 Aug 2011 08:45:38 -0400
-Subject: ext4: use ext4_kvzalloc()/ext4_kvmalloc() for s_group_desc and s_group_info
-Git-commit: f18a5f21
-Patch-mainline: v3.1-rc1
-
-Upstream-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Signed-off-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/ext4/mballoc.c | 6 +++---
- fs/ext4/resize.c | 13 +++++++------
- fs/ext4/super.c | 9 +++++----
- 3 files changed, 15 insertions(+), 13 deletions(-)
-
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -2307,7 +2307,7 @@ static int ext4_mb_init_backend(struct s
- /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
- * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
- * So a two level scheme suffices for now. */
-- sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
-+ sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
- if (sbi->s_group_info == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
- return -ENOMEM;
-@@ -2339,7 +2339,7 @@ err_freebuddy:
- kfree(sbi->s_group_info[i]);
- iput(sbi->s_buddy_cache);
- err_freesgi:
-- kfree(sbi->s_group_info);
-+ ext4_kvfree(sbi->s_group_info);
- return -ENOMEM;
- }
-
-@@ -2464,7 +2464,7 @@ int ext4_mb_release(struct super_block *
- EXT4_DESC_PER_BLOCK_BITS(sb);
- for (i = 0; i < num_meta_group_infos; i++)
- kfree(sbi->s_group_info[i]);
-- kfree(sbi->s_group_info);
-+ ext4_kvfree(sbi->s_group_info);
- }
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
---- a/fs/ext4/resize.c
-+++ b/fs/ext4/resize.c
-@@ -435,12 +435,13 @@ static int add_new_gdb(handle_t *handle,
- if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
- goto exit_dindj;
-
-- n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
-- GFP_NOFS);
-+ n_group_desc = ext4_kvmalloc((gdb_num + 1) *
-+ sizeof(struct buffer_head *),
-+ GFP_NOFS);
- if (!n_group_desc) {
- err = -ENOMEM;
-- ext4_warning(sb,
-- "not enough memory for %lu groups", gdb_num + 1);
-+ ext4_warning(sb, "not enough memory for %lu groups",
-+ gdb_num + 1);
- goto exit_inode;
- }
-
-@@ -467,7 +468,7 @@ static int add_new_gdb(handle_t *handle,
- n_group_desc[gdb_num] = *primary;
- EXT4_SB(sb)->s_group_desc = n_group_desc;
- EXT4_SB(sb)->s_gdb_count++;
-- kfree(o_group_desc);
-+ ext4_kvfree(o_group_desc);
-
- le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
- ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
-@@ -475,7 +476,7 @@ static int add_new_gdb(handle_t *handle,
- return 0;
-
- exit_inode:
-- kfree(n_group_desc);
-+ ext4_kvfree(n_group_desc);
- /* ext4_journal_release_buffer(handle, iloc.bh); */
- brelse(iloc.bh);
- exit_dindj:
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -705,7 +705,7 @@ static void ext4_put_super(struct super_
-
- for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(sbi->s_group_desc[i]);
-- kfree(sbi->s_group_desc);
-+ ext4_kvfree(sbi->s_group_desc);
- ext4_kvfree(sbi->s_flex_groups);
- percpu_counter_destroy(&sbi->s_freeblocks_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
-@@ -3169,8 +3169,9 @@ static int ext4_fill_super(struct super_
- (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
- db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
- EXT4_DESC_PER_BLOCK(sb);
-- sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
-- GFP_KERNEL);
-+ sbi->s_group_desc = ext4_kvmalloc(db_count *
-+ sizeof(struct buffer_head *),
-+ GFP_KERNEL);
- if (sbi->s_group_desc == NULL) {
- ext4_msg(sb, KERN_ERR, "not enough memory");
- goto failed_mount;
-@@ -3495,7 +3496,7 @@ failed_mount3:
- failed_mount2:
- for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
-- kfree(sbi->s_group_desc);
-+ ext4_kvfree(sbi->s_group_desc);
- failed_mount:
- if (sbi->s_proc) {
- remove_proc_entry(sb->s_id, ext4_proc_root);
+++ /dev/null
-From 94de56ab2062be59d80e2efb7c0dc60ecf616075 Mon Sep 17 00:00:00 2001
-From: Joe Perches <joe@perches.com>
-Date: Sun, 19 Dec 2010 22:21:02 -0500
-Subject: ext4: Use vzalloc in ext4_fill_flex_info()
-Git-commit: 94de56ab
-Patch-mainline: v2.6.38-rc1
-
-Signed-off-by: Joe Perches <joe@perches.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Acked-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/ext4/super.c | 15 +++++++--------
- 1 file changed, 7 insertions(+), 8 deletions(-)
-
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1817,14 +1817,13 @@ static int ext4_fill_flex_info(struct su
- size = flex_group_count * sizeof(struct flex_groups);
- sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
- if (sbi->s_flex_groups == NULL) {
-- sbi->s_flex_groups = vmalloc(size);
-- if (sbi->s_flex_groups)
-- memset(sbi->s_flex_groups, 0, size);
-- }
-- if (sbi->s_flex_groups == NULL) {
-- ext4_msg(sb, KERN_ERR, "not enough memory for "
-- "%u flex groups", flex_group_count);
-- goto failed;
-+ sbi->s_flex_groups = vzalloc(size);
-+ if (sbi->s_flex_groups == NULL) {
-+ ext4_msg(sb, KERN_ERR,
-+ "not enough memory for %u flex groups",
-+ flex_group_count);
-+ goto failed;
-+ }
- }
-
- for (i = 0; i < sbi->s_groups_count; i++) {
+++ /dev/null
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -1117,9 +1117,53 @@ static ssize_t ext4_quota_read(struct su
- static ssize_t ext4_quota_write(struct super_block *sb, int type,
- const char *data, size_t len, loff_t off);
-
-+static int ext4_dquot_initialize(struct inode *inode, int type)
-+{
-+ handle_t *handle;
-+ int ret, err;
-+
-+ if (IS_NOQUOTA(inode))
-+ return 0;
-+
-+ /* We may create quota structure so we need to reserve enough blocks */
-+ handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ ret = dquot_initialize(inode, type);
-+ err = ext4_journal_stop(handle);
-+ if (!ret)
-+ ret = err;
-+ return ret;
-+}
-+
-+static int ext4_dquot_drop(struct inode *inode)
-+{
-+ handle_t *handle;
-+ int ret, err;
-+
-+ if (IS_NOQUOTA(inode))
-+ return 0;
-+
-+ /* We may delete quota structure so we need to reserve enough blocks */
-+ handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
-+ if (IS_ERR(handle)) {
-+ /*
-+ * We call dquot_drop() anyway to at least release references
-+ * to quota structures so that umount does not hang.
-+ */
-+ dquot_drop(inode);
-+ return PTR_ERR(handle);
-+ }
-+ ret = dquot_drop(inode);
-+ err = ext4_journal_stop(handle);
-+ if (!ret)
-+ ret = err;
-+ return ret;
-+}
-+
- static const struct dquot_operations ext4_quota_operations = {
-- .initialize = dquot_initialize,
-- .drop = dquot_drop,
-+ .initialize = ext4_dquot_initialize,
-+ .drop = ext4_dquot_drop,
- .alloc_space = dquot_alloc_space,
- .reserve_space = dquot_reserve_space,
- .claim_space = dquot_claim_space,
+++ /dev/null
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -5654,7 +5654,7 @@ static int ext4_do_update_inode(handle_t
- raw_inode->i_file_acl_high =
- cpu_to_le16(ei->i_file_acl >> 32);
- raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
-- if (ei->i_disksize != ext4_isize(raw_inode)) {
-+ if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
- ext4_isize_set(raw_inode, ei->i_disksize);
- need_datasync = 1;
- }
+++ /dev/null
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -4825,6 +4825,11 @@ do_more:
- * be used until this transaction is committed
- */
- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-+ if (!new_entry) {
-+ ext4_mb_release_desc(&e4b);
-+ err = -ENOMEM;
-+ goto error_return;
-+ }
- new_entry->efd_start_blk = bit;
- new_entry->efd_group = block_group;
- new_entry->efd_count = count;
+++ /dev/null
-diff -urpN linux-stage.orig/fs/ext4/super.c linux-stage/fs/ext4/super.c
---- linux-stage.orig/fs/ext4/super.c 2013-05-13 09:35:17.628478645 -0400
-+++ linux-stage/fs/ext4/super.c 2013-05-13 09:46:08.062358974 -0400
-@@ -1268,6 +1268,7 @@ enum {
- Opt_extents, Opt_noextents,
- Opt_no_mbcache,
- Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
-+ Opt_max_dir_size_kb,
- };
-
- static const match_table_t tokens = {
-@@ -1350,6 +1350,7 @@ static const match_table_t tokens = {
- {Opt_init_itable, "init_itable=%u"},
- {Opt_init_itable, "init_itable"},
- {Opt_noinit_itable, "noinit_itable"},
-+ {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
- {Opt_err, NULL},
- };
-
-@@ -1736,6 +1737,13 @@ set_qf_format:
- case Opt_nodelalloc:
- clear_opt(sbi->s_mount_opt, DELALLOC);
- break;
-+ case Opt_max_dir_size_kb:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_max_dir_size = option * 1024;
-+ break;
- case Opt_stripe:
- if (match_int(&args[0], &option))
- return 0;
+++ /dev/null
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -3585,6 +3585,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
- INIT_LIST_HEAD(&pa->pa_group_list);
- pa->pa_deleted = 0;
- pa->pa_type = MB_INODE_PA;
-+ pa->pa_error = 0;
-
- mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-@@ -3646,6 +3647,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
- INIT_LIST_HEAD(&pa->pa_group_list);
- pa->pa_deleted = 0;
- pa->pa_type = MB_GROUP_PA;
-+ pa->pa_error = 0;
-
- mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-@@ -3708,7 +3710,9 @@ ext4_mb_release_inode_pa(struct ext4_bud
- int err = 0;
- int free = 0;
-
-+ assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
- BUG_ON(pa->pa_deleted == 0);
-+ BUG_ON(pa->pa_inode == NULL);
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- grp_blk_start = pa->pa_pstart - bit;
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
-@@ -3744,19 +3748,27 @@ ext4_mb_release_inode_pa(struct ext4_bud
- mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
- bit = next + 1;
- }
-- if (free != pa->pa_free) {
-- printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
-- pa, (unsigned long) pa->pa_lstart,
-- (unsigned long) pa->pa_pstart,
-- (unsigned long) pa->pa_len);
-+
-+ /* "free < pa->pa_free" means we maybe double alloc the same blocks,
-+ * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
-+ if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
-+ ext4_error(sb, "pa free mismatch: [pa %p] "
-+ "[phy %lu] [logic %lu] [len %u] [free %u] "
-+ "[error %u] [inode %lu] [freed %u]", pa,
-+ (unsigned long)pa->pa_pstart,
-+ (unsigned long)pa->pa_lstart,
-+ (unsigned)pa->pa_len, (unsigned)pa->pa_free,
-+ (unsigned)pa->pa_error, pa->pa_inode->i_ino,
-+ free);
- ext4_grp_locked_error(sb, group,
-- __func__, "free %u, pa_free %u",
-- free, pa->pa_free);
-+ __func__, "free %u, pa_free %u",
-+ free, pa->pa_free);
- /*
- * pa is already deleted so we use the value obtained
- * from the bitmap and continue.
- */
- }
-+ BUG_ON(pa->pa_free != free);
- atomic_add(free, &sbi->s_mb_discarded);
-
- return err;
-@@ -4541,6 +4553,24 @@ repeat:
- ac->ac_b_ex.fe_len = 0;
- ar->len = 0;
- ext4_mb_show_ac(ac);
-+ if (ac->ac_pa) {
-+ struct ext4_prealloc_space *pa = ac->ac_pa;
-+ /* We can not make sure whether the bitmap has
-+ * been updated or not when fail case. So can
-+ * not revert pa_free back, just mark pa_error*/
-+ pa->pa_error++;
-+ ext4_error(sb,
-+ "Updating bitmap error: [err %d] "
-+ "[pa %p] [phy %lu] [logic %lu] "
-+ "[len %u] [free %u] [error %u] "
-+ "[inode %lu]", *errp, pa,
-+ (unsigned long)pa->pa_pstart,
-+ (unsigned long)pa->pa_lstart,
-+ (unsigned)pa->pa_len,
-+ (unsigned)pa->pa_free,
-+ (unsigned)pa->pa_error,
-+ pa->pa_inode ? pa->pa_inode->i_ino : 0);
-+ }
- }
- ext4_mb_release_context(ac);
- out:
-Index: linux-stage/fs/ext4/mballoc.h
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.h
-+++ linux-stage/fs/ext4/mballoc.h
-@@ -20,6 +20,7 @@
- #include <linux/version.h>
- #include <linux/blkdev.h>
- #include <linux/mutex.h>
-+#include <linux/genhd.h>
- #include "ext4_jbd2.h"
- #include "ext4.h"
-
-@@ -130,6 +131,7 @@ struct ext4_prealloc_space {
- ext4_grpblk_t pa_free; /* how many blocks are free */
- unsigned short pa_type; /* pa type. inode or group */
- spinlock_t *pa_obj_lock;
-+ unsigned short pa_error;
- struct inode *pa_inode; /* hack, for history only */
- };
-
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1757,6 +1760,9 @@ extern void ext4_add_groupblocks(handle_
- ext4_fsblk_t block, unsigned long count);
- extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
-
-+extern void ext4_mb_discard_inode_preallocations(struct inode *);
-+
-+
- /* inode.c */
- int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
- struct buffer_head *bh, ext4_fsblk_t blocknr);
-Index: linux-stage/fs/ext4/ext4_extents.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_extents.h
-+++ linux-stage/fs/ext4/ext4_extents.h
-@@ -58,6 +58,12 @@
- */
- #define EXT_STATS_
-
-+/*
-+ * define EXT4_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb
-+ * are now accounted in ext4_ext_calc_credits_for_insert()
-+ */
-+#define EXT4_ALLOC_NEEDED 0
-+#define HAVE_EXT_PREPARE_CB_EXTENT
-
- /*
- * ext4_inode has i_block array (60 bytes total).
-@@ -291,6 +297,8 @@ extern int ext4_extent_tree_init(handle_
- extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
- int num,
- struct ext4_ext_path *path);
-+extern int ext4_ext_calc_credits_for_insert(struct inode *,
-+ struct ext4_ext_path *);
- extern int ext4_can_extents_be_merged(struct inode *inode,
- struct ext4_extent *ex1,
- struct ext4_extent *ex2);
-Index: linux-stage/fs/ext4/ext4_jbd2.c
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.c
-+++ linux-stage/fs/ext4/ext4_jbd2.c
-@@ -31,6 +31,7 @@ int __ext4_journal_get_write_access(cons
- }
- return err;
- }
-+EXPORT_SYMBOL(__ext4_journal_get_write_access);
-
- int __ext4_journal_forget(const char *where, handle_t *handle,
- struct buffer_head *bh)
-@@ -107,3 +108,4 @@ int __ext4_handle_dirty_metadata(const c
- }
- return err;
- }
-+EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c
-+++ linux-stage/fs/ext4/extents.c
-@@ -2200,6 +2200,55 @@ int ext4_ext_calc_credits_for_single_ext
- }
-
- /*
-+ * This routine returns max. credits extent tree can consume.
-+ * It should be OK for low-performance paths like ->writepage()
-+ * To allow many writing process to fit a single transaction,
-+ * caller should calculate credits under truncate_mutex and
-+ * pass actual path.
-+ */
-+int ext4_ext_calc_credits_for_insert(struct inode *inode,
-+ struct ext4_ext_path *path)
-+{
-+ int depth, needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ depth = path->p_depth;
-+ if (le16_to_cpu(path[depth].p_hdr->eh_entries)
-+ < le16_to_cpu(path[depth].p_hdr->eh_max))
-+ return 1;
-+ }
-+
-+ /*
-+ * given 32bit logical block (4294967296 blocks), max. tree
-+ * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
-+ * let's also add one more level for imbalance.
-+ */
-+ depth = 5;
-+
-+ /* allocation of new data block(s) */
-+ needed = 2;
-+
-+ /*
-+ * tree can be full, so it'd need to grow in depth:
-+ * we need one credit to modify old root, credits for
-+ * new root will be added in split accounting
-+ */
-+ needed += 1;
-+ /*
-+ * Index split can happen, we'd need:
-+ * allocate intermediate indexes (bitmap + group)
-+ * + change two blocks at each level, but root (already included)
-+ */
-+ needed += (depth * 2) + (depth * 2);
-+
-+ /* any allocation modifies superblock */
-+ needed += 1;
-+
-+ return needed;
-+}
-+
-+/*
- * How many index/leaf blocks need to change/allocate to modify nrblocks?
- *
- * if nrblocks are fit in a single extent (chunk flag is 1), then
-@@ -4488,3 +4537,12 @@ int ext4_fiemap(struct inode *inode, str
- return error;
- }
-
-+EXPORT_SYMBOL(ext4_ext_search_right);
-+EXPORT_SYMBOL(ext4_ext_search_left);
-+EXPORT_SYMBOL(ext4_ext_insert_extent);
-+EXPORT_SYMBOL(ext4_mb_new_blocks);
-+EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
-+EXPORT_SYMBOL(ext4_mark_inode_dirty);
-+EXPORT_SYMBOL(ext4_ext_walk_space);
-+EXPORT_SYMBOL(ext4_ext_find_extent);
-+EXPORT_SYMBOL(ext4_ext_drop_refs);
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -5549,6 +5549,7 @@ bad_inode:
- iget_failed(inode);
- return ERR_PTR(ret);
- }
-+EXPORT_SYMBOL(ext4_iget);
-
- static int ext4_inode_blocks_set(handle_t *handle,
- struct ext4_inode *raw_inode,
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -4031,6 +4031,7 @@ repeat:
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
- }
-+EXPORT_SYMBOL(ext4_discard_preallocations);
-
- /*
- * finds all preallocated spaces and return blocks being freed to them
-@@ -5189,3 +5190,6 @@ out:
- range->len = trimmed * sb->s_blocksize;
- return ret;
- }
-+
-+EXPORT_SYMBOL(ext4_free_blocks);
-+
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -137,6 +137,7 @@ __u32 ext4_itable_unused_count(struct su
- (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
- }
-+EXPORT_SYMBOL(ext4_itable_unused_count);
-
- void ext4_block_bitmap_set(struct super_block *sb,
- struct ext4_group_desc *bg, ext4_fsblk_t blk)
+++ /dev/null
-Prevent an ext4 filesystem from being mounted multiple times.
-A sequence number is stored on disk and is periodically updated (every 5
-seconds by default) by a mounted filesystem.
-At mount time, we now wait for s_mmp_update_interval seconds to make sure
-that the MMP sequence does not change.
-In case of failure, the nodename, bdevname and the time at which the MMP
-block was last updated is displayed.
-Move all mmp code to a dedicated file (mmp.c).
-
-Signed-off-by: Andreas Dilger <adilger <at> whamcloud.com>
-Signed-off-by: Johann Lombardi <johann <at> whamcloud.com>
----
- fs/ext4/Makefile | 3 +-
- fs/ext4/ext4.h | 76 ++++++++++++-
- fs/ext4/mmp.c | 354 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
- fs/ext4/super.c | 18 +++-
- 4 files changed, 447 insertions(+), 4 deletions(-)
- create mode 100644 fs/ext4/mmp.c
-
-Index: linux-stage/fs/ext4/Makefile
-===================================================================
---- linux-stage.orig/fs/ext4/Makefile
-+++ linux-stage/fs/ext4/Makefile
-@@ -6,7 +6,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
-
- ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-- ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
-+ ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-+ mmp.o
-
- ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1009,7 +1009,7 @@ struct ext4_super_block {
- __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
- __le32 s_flags; /* Miscellaneous flags */
- __le16 s_raid_stride; /* RAID stride */
-- __le16 s_mmp_interval; /* # seconds to wait in MMP checking */
-+ __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */
- __le64 s_mmp_block; /* Block for multi-mount protection */
- __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
- __u8 s_log_groups_per_flex; /* FLEX_BG group size */
-@@ -1177,6 +1177,9 @@ struct ext4_sb_info {
- /* workqueue for dio unwritten */
- struct workqueue_struct *dio_unwritten_wq;
-
-+ /* Kernel thread for multiple mount protection */
-+ struct task_struct *s_mmp_tsk;
-+
- /* Lazy inode table initialization info */
- struct ext4_li_request *s_li_request;
- /* Wait multiplier for lazy initialization thread */
-@@ -1322,7 +1325,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_INCOMPAT_META_BG| \
- EXT4_FEATURE_INCOMPAT_EXTENTS| \
- EXT4_FEATURE_INCOMPAT_64BIT| \
-- EXT4_FEATURE_INCOMPAT_FLEX_BG)
-+ EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-+ EXT4_FEATURE_INCOMPAT_MMP)
- #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
-@@ -1576,6 +1580,67 @@ struct ext4_features {
- };
-
- /*
-+ * This structure will be used for multiple mount protection. It will be
-+ * written into the block number saved in the s_mmp_block field in the
-+ * superblock. Programs that check MMP should assume that if
-+ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
-+ * to use the filesystem, regardless of how old the timestamp is.
-+ */
-+#define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */
-+#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
-+#define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */
-+#define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */
-+
-+struct mmp_struct {
-+ __le32 mmp_magic; /* Magic number for MMP */
-+ __le32 mmp_seq; /* Sequence no. updated periodically */
-+
-+ /*
-+ * mmp_time, mmp_nodename & mmp_bdevname are only used for information
-+ * purposes and do not affect the correctness of the algorithm
-+ */
-+ __le64 mmp_time; /* Time last updated */
-+ char mmp_nodename[64]; /* Node which last updated MMP block */
-+ char mmp_bdevname[32]; /* Bdev which last updated MMP block */
-+
-+ /*
-+ * mmp_check_interval is used to verify if the MMP block has been
-+ * updated on the block device. The value is updated based on the
-+ * maximum time to write the MMP block during an update cycle.
-+ */
-+ __le16 mmp_check_interval;
-+
-+ __le16 mmp_pad1;
-+ __le32 mmp_pad2[227];
-+};
-+
-+/* arguments passed to the mmp thread */
-+struct mmpd_data {
-+ struct buffer_head *bh; /* bh from initial read_mmp_block() */
-+ struct super_block *sb; /* super block of the fs */
-+};
-+
-+/*
-+ * Check interval multiplier
-+ * The MMP block is written every update interval and initially checked every
-+ * update interval x the multiplier (the value is then adapted based on the
-+ * write latency). The reason is that writes can be delayed under load and we
-+ * don't want readers to incorrectly assume that the filesystem is no longer
-+ * in use.
-+ */
-+#define EXT4_MMP_CHECK_MULT 2UL
-+
-+/*
-+ * Minimum interval for MMP checking in seconds.
-+ */
-+#define EXT4_MMP_MIN_CHECK_INTERVAL 5UL
-+
-+/*
-+ * Maximum interval for MMP checking in seconds.
-+ */
-+#define EXT4_MMP_MAX_CHECK_INTERVAL 300UL
-+
-+/*
- * Function prototypes
- */
-
-@@ -1757,6 +1822,10 @@ extern void __ext4_warning(struct super_
- #define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
- extern void ext4_msg(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-+extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
-+ const char *, const char *);
-+#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \
-+ msg)
- extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
- const char *, const char *, ...)
- __attribute__ ((format (printf, 4, 5)));
-@@ -2050,6 +2119,8 @@ extern int ext4_move_extents(struct file
- __u64 start_orig, __u64 start_donor,
- __u64 len, __u64 *moved_len);
-
-+/* mmp.c */
-+extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
-
- /*
- * Add new method to test wether block and inode bitmaps are properly
-Index: linux-stage/fs/ext4/mmp.c
-===================================================================
---- /dev/null
-+++ linux-stage/fs/ext4/mmp.c
-@@ -0,0 +1,357 @@
-+#include <linux/fs.h>
-+#include <linux/random.h>
-+#include <linux/buffer_head.h>
-+#include <linux/utsname.h>
-+#include <linux/kthread.h>
-+
-+#include "ext4.h"
-+
-+/*
-+ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
-+ * faster.
-+ */
-+static int write_mmp_block(struct buffer_head *bh)
-+{
-+ mark_buffer_dirty(bh);
-+ lock_buffer(bh);
-+ bh->b_end_io = end_buffer_write_sync;
-+ get_bh(bh);
-+ submit_bh(WRITE_SYNC, bh);
-+ wait_on_buffer(bh);
-+ if (unlikely(!buffer_uptodate(bh)))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Read the MMP block. It _must_ be read from disk and hence we clear the
-+ * uptodate flag on the buffer.
-+ */
-+static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
-+ ext4_fsblk_t mmp_block)
-+{
-+ struct mmp_struct *mmp;
-+
-+ if (*bh)
-+ clear_buffer_uptodate(*bh);
-+
-+ /* This would be sb_bread(sb, mmp_block), except we need to be sure
-+ * that the MD RAID device cache has been bypassed, and that the read
-+ * is not blocked in the elevator. */
-+ if (!*bh)
-+ *bh = sb_getblk(sb, mmp_block);
-+ if (*bh) {
-+ get_bh(*bh);
-+ lock_buffer(*bh);
-+ (*bh)->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ_SYNC, *bh);
-+ wait_on_buffer(*bh);
-+ if (!buffer_uptodate(*bh)) {
-+ brelse(*bh);
-+ *bh = NULL;
-+ }
-+ }
-+ if (!*bh) {
-+ ext4_warning(sb, "Error while reading MMP block %llu",
-+ mmp_block);
-+ return -EIO;
-+ }
-+
-+ mmp = (struct mmp_struct *)((*bh)->b_data);
-+ if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
-+ brelse(*bh);
-+ *bh = NULL;
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Dump as much information as possible to help the admin.
-+ */
-+void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
-+ const char *function, const char *msg)
-+{
-+ __ext4_warning(sb, function, msg);
-+ __ext4_warning(sb, function,
-+ "MMP failure info: last update time: %llu, last update "
-+ "node: %s, last update device: %s\n",
-+ (long long unsigned int) le64_to_cpu(mmp->mmp_time),
-+ mmp->mmp_nodename, mmp->mmp_bdevname);
-+}
-+
-+/*
-+ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
-+ */
-+static int kmmpd(void *data)
-+{
-+ struct super_block *sb = ((struct mmpd_data *) data)->sb;
-+ struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
-+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-+ struct mmp_struct *mmp;
-+ ext4_fsblk_t mmp_block;
-+ u32 seq = 0;
-+ unsigned long failed_writes = 0;
-+ int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
-+ unsigned mmp_check_interval;
-+ unsigned long last_update_time;
-+ unsigned long diff;
-+ int retval;
-+
-+ mmp_block = le64_to_cpu(es->s_mmp_block);
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+ /*
-+ * Start with the higher mmp_check_interval and reduce it if
-+ * the MMP block is being updated on time.
-+ */
-+ mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
-+ EXT4_MMP_MIN_CHECK_INTERVAL);
-+ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
-+ bdevname(bh->b_bdev, mmp->mmp_bdevname);
-+
-+ memcpy(mmp->mmp_nodename, init_utsname()->nodename,
-+ sizeof(mmp->mmp_nodename));
-+
-+ while (!kthread_should_stop()) {
-+ if (++seq > EXT4_MMP_SEQ_MAX)
-+ seq = 1;
-+
-+ mmp->mmp_seq = cpu_to_le32(seq);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+ last_update_time = jiffies;
-+
-+ retval = write_mmp_block(bh);
-+ /*
-+ * Don't spew too many error messages. Print one every
-+ * (s_mmp_update_interval * 60) seconds.
-+ */
-+ if (retval) {
-+ if ((failed_writes % 60) == 0)
-+ ext4_error(sb, "Error writing to MMP block");
-+ failed_writes++;
-+ }
-+
-+ if (!(le32_to_cpu(es->s_feature_incompat) &
-+ EXT4_FEATURE_INCOMPAT_MMP)) {
-+ ext4_warning(sb, "kmmpd being stopped since MMP feature"
-+ " has been disabled.");
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ if (sb->s_flags & MS_RDONLY) {
-+ ext4_warning(sb, "kmmpd being stopped since filesystem "
-+ "has been remounted as readonly.");
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ diff = jiffies - last_update_time;
-+ if (diff < mmp_update_interval * msecs_to_jiffies(MSEC_PER_SEC))
-+ schedule_timeout_interruptible(mmp_update_interval *
-+ msecs_to_jiffies(MSEC_PER_SEC) - diff);
-+
-+ /*
-+ * We need to make sure that more than mmp_check_interval
-+ * seconds have not passed since writing. If that has happened
-+ * we need to check if the MMP block is as we left it.
-+ */
-+ diff = jiffies - last_update_time;
-+ if (diff > mmp_check_interval * msecs_to_jiffies(MSEC_PER_SEC)) {
-+ struct buffer_head *bh_check = NULL;
-+ struct mmp_struct *mmp_check;
-+
-+ retval = read_mmp_block(sb, &bh_check, mmp_block);
-+ if (retval) {
-+ ext4_error(sb, "error reading MMP data: %d",
-+ retval);
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ goto failed;
-+ }
-+
-+ mmp_check = (struct mmp_struct *)(bh_check->b_data);
-+ if (mmp->mmp_seq != mmp_check->mmp_seq ||
-+ memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
-+ sizeof(mmp->mmp_nodename))) {
-+ dump_mmp_msg(sb, mmp_check,
-+ "Error while updating MMP info. "
-+ "The filesystem seems to have been"
-+ " multiply mounted.");
-+ ext4_error(sb, "abort");
-+ put_bh(bh_check);
-+ goto failed;
-+ }
-+ put_bh(bh_check);
-+ }
-+
-+ /*
-+ * Adjust the mmp_check_interval depending on how much time
-+ * it took for the MMP block to be written.
-+ */
-+ mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff /
-+ msecs_to_jiffies(MSEC_PER_SEC),
-+ EXT4_MMP_MAX_CHECK_INTERVAL),
-+ EXT4_MMP_MIN_CHECK_INTERVAL);
-+ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
-+ }
-+
-+ /*
-+ * Unmount seems to be clean.
-+ */
-+ mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
-+ mmp->mmp_time = cpu_to_le64(get_seconds());
-+
-+ retval = write_mmp_block(bh);
-+
-+failed:
-+ kfree(data);
-+ brelse(bh);
-+ return retval;
-+}
-+
-+/*
-+ * Get a random new sequence number but make sure it is not greater than
-+ * EXT4_MMP_SEQ_MAX.
-+ */
-+static unsigned int mmp_new_seq(void)
-+{
-+ u32 new_seq;
-+
-+ do {
-+ get_random_bytes(&new_seq, sizeof(u32));
-+ } while (new_seq > EXT4_MMP_SEQ_MAX);
-+
-+ return new_seq;
-+}
-+
-+/*
-+ * Protect the filesystem from being mounted more than once.
-+ */
-+int ext4_multi_mount_protect(struct super_block *sb,
-+ ext4_fsblk_t mmp_block)
-+{
-+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-+ struct buffer_head *bh = NULL;
-+ struct mmp_struct *mmp = NULL;
-+ struct mmpd_data *mmpd_data;
-+ u32 seq;
-+ unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
-+ unsigned int wait_time = 0;
-+ int retval;
-+
-+ if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
-+ mmp_block >= ext4_blocks_count(es)) {
-+ ext4_warning(sb, "Invalid MMP block in superblock");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+
-+ if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
-+ mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
-+
-+ /*
-+ * If check_interval in MMP block is larger, use that instead of
-+ * update_interval from the superblock.
-+ */
-+ if (mmp->mmp_check_interval > mmp_check_interval)
-+ mmp_check_interval = mmp->mmp_check_interval;
-+
-+ seq = le32_to_cpu(mmp->mmp_seq);
-+ if (seq == EXT4_MMP_SEQ_CLEAN)
-+ goto skip;
-+
-+ if (seq == EXT4_MMP_SEQ_FSCK) {
-+ dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
-+ goto failed;
-+ }
-+
-+ wait_time = min(mmp_check_interval * 2 + 1,
-+ mmp_check_interval + 60);
-+
-+ /* Print MMP interval if more than 20 secs. */
-+ if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
-+ ext4_warning(sb, "MMP interval %u higher than expected, please"
-+ " wait.\n", wait_time * 2);
-+
-+ if (schedule_timeout_interruptible(msecs_to_jiffies(MSEC_PER_SEC) *
-+ wait_time) != 0) {
-+ ext4_warning(sb, "MMP startup interrupted, failing mount\n");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ if (seq != le32_to_cpu(mmp->mmp_seq)) {
-+ dump_mmp_msg(sb, mmp,
-+ "Device is already active on another node.");
-+ goto failed;
-+ }
-+
-+skip:
-+ /*
-+ * write a new random sequence number.
-+ */
-+ mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
-+
-+ retval = write_mmp_block(bh);
-+ if (retval)
-+ goto failed;
-+
-+ /*
-+ * wait for MMP interval and check mmp_seq.
-+ */
-+ if (schedule_timeout_interruptible(msecs_to_jiffies(MSEC_PER_SEC) *
-+ wait_time) != 0) {
-+ ext4_warning(sb, "MMP startup interrupted, failing mount\n");
-+ goto failed;
-+ }
-+
-+ retval = read_mmp_block(sb, &bh, mmp_block);
-+ if (retval)
-+ goto failed;
-+ mmp = (struct mmp_struct *)(bh->b_data);
-+ if (seq != le32_to_cpu(mmp->mmp_seq)) {
-+ dump_mmp_msg(sb, mmp,
-+ "Device is already active on another node.");
-+ goto failed;
-+ }
-+
-+ mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
-+ if (!mmpd_data) {
-+ ext4_warning(sb, "not enough memory for mmpd_data");
-+ goto failed;
-+ }
-+ mmpd_data->sb = sb;
-+ mmpd_data->bh = bh;
-+
-+ /*
-+ * Start a kernel thread to update the MMP block periodically.
-+ */
-+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
-+ bdevname(bh->b_bdev,
-+ mmp->mmp_bdevname));
-+ if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
-+ EXT4_SB(sb)->s_mmp_tsk = NULL;
-+ kfree(mmpd_data);
-+ ext4_warning(sb, "Unable to create kmmpd thread for %s.",
-+ sb->s_id);
-+ goto failed;
-+ }
-+
-+ return 0;
-+
-+failed:
-+ brelse(bh);
-+ return 1;
-+}
-+
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -40,6 +40,8 @@
- #include <linux/log2.h>
- #include <linux/crc16.h>
- #include <asm/uaccess.h>
-+#include <linux/kthread.h>
-+#include <linux/utsname.h>
-
- #include <linux/kthread.h>
- #include <linux/freezer.h>
-@@ -716,6 +718,8 @@ static void ext4_put_super(struct super_
- invalidate_bdev(sbi->journal_bdev);
- ext4_blkdev_remove(sbi);
- }
-+ if (sbi->s_mmp_tsk)
-+ kthread_stop(sbi->s_mmp_tsk);
- sb->s_fs_info = NULL;
- /*
- * Now that we are completely done shutting down the
-@@ -3241,6 +3245,10 @@ static int ext4_fill_super(struct super_
- needs_recovery = (es->s_last_orphan != 0 ||
- EXT4_HAS_INCOMPAT_FEATURE(sb,
- EXT4_FEATURE_INCOMPAT_RECOVER));
-+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
-+ !(sb->s_flags & MS_RDONLY))
-+ if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
-+ goto failed_mount3;
-
- /*
- * The first inode we look at is the journal inode. Don't try
-@@ -3491,6 +3499,8 @@ failed_mount3:
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
- percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
-+ if (sbi->s_mmp_tsk)
-+ kthread_stop(sbi->s_mmp_tsk);
- failed_mount2:
- for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
-@@ -4001,7 +4011,7 @@ static int ext4_remount(struct super_blo
- int enable_quota = 0;
- ext4_group_t g;
- unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
-- int err;
-+ int err = 0;
- #ifdef CONFIG_QUOTA
- int i;
- #endif
-@@ -4129,6 +4139,13 @@ static int ext4_remount(struct super_blo
- goto restore_opts;
- if (!ext4_setup_super(sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
-+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
-+ EXT4_FEATURE_INCOMPAT_MMP))
-+ if (ext4_multi_mount_protect(sb,
-+ le64_to_cpu(es->s_mmp_block))) {
-+ err = -EROFS;
-+ goto restore_opts;
-+ }
- enable_quota = 1;
- }
- }
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1136,11 +1136,14 @@ struct ext4_sb_info {
-
- /* tunables */
- unsigned long s_stripe;
-- unsigned int s_mb_stream_request;
-+ unsigned long s_mb_small_req;
-+ unsigned long s_mb_large_req;
- unsigned int s_mb_max_to_scan;
- unsigned int s_mb_min_to_scan;
- unsigned int s_mb_stats;
- unsigned int s_mb_order2_reqs;
-+ unsigned long *s_mb_prealloc_table;
-+ unsigned long s_mb_prealloc_table_size;
- unsigned int s_mb_group_prealloc;
- unsigned int s_max_writeback_mb_bump;
- /* where last allocation was done - for stream allocation */
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -1838,6 +1838,26 @@ void ext4_mb_complex_scan_group(struct e
- ext4_mb_check_limits(ac, e4b, 1);
- }
-
-+static int ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
-+{
-+ int i;
-+
-+ if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
-+ return -1;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (sbi->s_mb_prealloc_table[i] == 0) {
-+ sbi->s_mb_prealloc_table[i] = value;
-+ return 0;
-+ }
-+
-+ /* they should add values in order */
-+ if (value <= sbi->s_mb_prealloc_table[i])
-+ return -1;
-+ }
-+ return -1;
-+}
-+
- /*
- * This is a special case for storages like raid5
- * we try to find stripe-aligned chunks for stripe-size requests
-@@ -2155,6 +2175,82 @@ static const struct seq_operations ext4_
- .show = ext4_mb_seq_groups_show,
- };
-
-+#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
-+
-+static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ int len = 0;
-+ int i;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
-+ len += sprintf(page + len, "%ld ",
-+ sbi->s_mb_prealloc_table[i]);
-+ len += sprintf(page + len, "\n");
-+
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext4_mb_prealloc_table_proc_write(struct file *file,
-+ const char __user *buf,
-+ unsigned long cnt, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ unsigned long value;
-+ unsigned long prev = 0;
-+ char str[128];
-+ char *cur;
-+ char *end;
-+ unsigned long *new_table;
-+ int num = 0;
-+ int i = 0;
-+
-+ if (cnt >= sizeof(str))
-+ return -EINVAL;
-+ if (copy_from_user(str, buf, cnt))
-+ return -EFAULT;
-+
-+ num = 0;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ if (value == 0)
-+ break;
-+ if (value <= prev)
-+ return -EINVAL;
-+ prev = value;
-+ num++;
-+ }
-+
-+ new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
-+ if (new_table == NULL)
-+ return -ENOMEM;
-+ kfree(sbi->s_mb_prealloc_table);
-+ memset(new_table, 0, num * sizeof(*new_table));
-+ sbi->s_mb_prealloc_table = new_table;
-+ sbi->s_mb_prealloc_table_size = num;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end && i < num) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ if (ext4_mb_prealloc_table_add(sbi, value) == 0)
-+ ++i;
-+ }
-+ if (i != num)
-+ sbi->s_mb_prealloc_table_size = i;
-+
-+ return cnt;
-+}
-+
- static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
- {
- struct super_block *sb = PDE(inode)->data;
-@@ -2346,7 +2442,7 @@ err_freesgi:
- int ext4_mb_init(struct super_block *sb, int needs_recovery)
- {
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-- unsigned i, j;
-+ unsigned i, j, k, l;
- unsigned offset;
- unsigned max;
- int ret;
-@@ -2380,26 +2476,61 @@ int ext4_mb_init(struct super_block *sb,
- i++;
- } while (i <= sb->s_blocksize_bits + 1);
-
-- /* init file for buddy data */
-- ret = ext4_mb_init_backend(sb);
-- if (ret != 0) {
-- kfree(sbi->s_mb_offsets);
-- kfree(sbi->s_mb_maxs);
-- return ret;
-- }
--
- spin_lock_init(&sbi->s_md_lock);
- spin_lock_init(&sbi->s_bal_lock);
-
- sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
- sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
- sbi->s_mb_stats = MB_DEFAULT_STATS;
-- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
- sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
-- sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
-+
-+ if (sbi->s_stripe == 0) {
-+ sbi->s_mb_prealloc_table_size = 10;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ for (k = 0, l = 4; k <= 9; ++k, l *= 2) {
-+ if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+ sbi->s_mb_prealloc_table_size = k;
-+ break;
-+ }
-+ }
-+
-+ sbi->s_mb_small_req = 256;
-+ sbi->s_mb_large_req = 1024;
-+ sbi->s_mb_group_prealloc = 512;
-+ } else {
-+ sbi->s_mb_prealloc_table_size = 3;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2) {
-+ if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+ sbi->s_mb_prealloc_table_size = k;
-+ break;
-+ }
-+ }
-+
-+ sbi->s_mb_small_req = sbi->s_stripe;
-+ sbi->s_mb_large_req = sbi->s_stripe * 8;
-+ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-+ }
-
- sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
- if (sbi->s_locality_groups == NULL) {
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return -ENOMEM;
-@@ -2413,9 +2544,27 @@ int ext4_mb_init(struct super_block *sb,
- spin_lock_init(&lg->lg_prealloc_lock);
- }
-
-- if (sbi->s_proc)
-+ /* init file for buddy data */
-+ ret = ext4_mb_init_backend(sb);
-+ if (ret != 0) {
-+ kfree(sbi->s_mb_prealloc_table);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return ret;
-+ }
-+
-+ if (sbi->s_proc) {
-+ struct proc_dir_entry *p;
- proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
- &ext4_mb_seq_groups_fops, sb);
-+ p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
-+ S_IRUGO | S_IWUSR, sbi->s_proc);
-+ if (p) {
-+ p->data = sbi;
-+ p->read_proc = ext4_mb_prealloc_table_proc_read;
-+ p->write_proc = ext4_mb_prealloc_table_proc_write;
-+ }
-+ }
-
- if (sbi->s_journal)
- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
-@@ -2448,8 +2597,10 @@ int ext4_mb_release(struct super_block *
- struct ext4_group_info *grinfo;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-- if (sbi->s_proc)
-+ if (sbi->s_proc) {
- remove_proc_entry("mb_groups", sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+ }
-
- if (sbi->s_group_info) {
- for (i = 0; i < ngroups; i++) {
-@@ -2469,6 +2620,7 @@ int ext4_mb_release(struct super_block *
- kfree(sbi->s_group_info[i]);
- ext4_kvfree(sbi->s_group_info);
- }
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- if (sbi->s_buddy_cache)
-@@ -2798,11 +2950,12 @@ static noinline_for_stack void
- ext4_mb_normalize_request(struct ext4_allocation_context *ac,
- struct ext4_allocation_request *ar)
- {
-- int bsbits, max;
-+ int bsbits, i, wind;
- ext4_lblk_t end;
-- loff_t size, orig_size, start_off;
-+ loff_t size, orig_size;
- ext4_lblk_t start, orig_start;
- struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
-+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_prealloc_space *pa;
-
- /* do normalize only data requests, metadata requests
-@@ -2832,49 +2985,35 @@ ext4_mb_normalize_request(struct ext4_al
- size = size << bsbits;
- if (size < i_size_read(ac->ac_inode))
- size = i_size_read(ac->ac_inode);
-+ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
-
-- /* max size of free chunks */
-- max = 2 << bsbits;
-+ start = wind = 0;
-
--#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
-- (req <= (size) || max <= (chunk_size))
-+ /* let's choose preallocation window depending on file size */
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (size <= sbi->s_mb_prealloc_table[i]) {
-+ wind = sbi->s_mb_prealloc_table[i];
-+ break;
-+ }
-+ }
-+ size = wind;
-
-- /* first, try to predict filesize */
-- /* XXX: should this table be tunable? */
-- start_off = 0;
-- if (size <= 16 * 1024) {
-- size = 16 * 1024;
-- } else if (size <= 32 * 1024) {
-- size = 32 * 1024;
-- } else if (size <= 64 * 1024) {
-- size = 64 * 1024;
-- } else if (size <= 128 * 1024) {
-- size = 128 * 1024;
-- } else if (size <= 256 * 1024) {
-- size = 256 * 1024;
-- } else if (size <= 512 * 1024) {
-- size = 512 * 1024;
-- } else if (size <= 1024 * 1024) {
-- size = 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (21 - bsbits)) << 21;
-- size = 2 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (22 - bsbits)) << 22;
-- size = 4 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
-- (8<<20)>>bsbits, max, 8 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (23 - bsbits)) << 23;
-- size = 8 * 1024 * 1024;
-- } else {
-- start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
-- size = ac->ac_o_ex.fe_len << bsbits;
-+ if (wind == 0) {
-+ __u64 tstart, tend;
-+ /* file is quite large, we now preallocate with
-+ * the biggest configured window with regart to
-+ * logical offset */
-+ wind = sbi->s_mb_prealloc_table[i - 1];
-+ tstart = ac->ac_o_ex.fe_logical;
-+ do_div(tstart, wind);
-+ start = tstart * wind;
-+ tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
-+ do_div(tend, wind);
-+ tend = tend * wind + wind;
-+ size = tend - start;
- }
-- orig_size = size = size >> bsbits;
-- orig_start = start = start_off >> bsbits;
-+ orig_size = size;
-+ orig_start = start;
-
- /* don't cover already allocated blocks in selected range */
- if (ar->pleft && start <= ar->lleft) {
-@@ -2946,7 +3085,6 @@ ext4_mb_normalize_request(struct ext4_al
- }
- BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
- start > ac->ac_o_ex.fe_logical);
-- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
-
- /* now prepare goal request */
-
-@@ -3930,11 +4068,19 @@ static void ext4_mb_group_or_file(struct
-
- /* don't use group allocation for large files */
- size = max(size, isize);
-- if (size > sbi->s_mb_stream_request) {
-+ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
-+ (size >= sbi->s_mb_large_req)) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
- return;
- }
-
-+ /*
-+ * request is so large that we don't care about
-+ * streaming - it overweights any possible seek
-+ */
-+ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
-+ return;
-+
- BUG_ON(ac->ac_lg != NULL);
- /*
- * locality group prealloc space are per cpu. The reason for having
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -2377,7 +2377,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
- EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
--EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-+EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
-+EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
- EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
- EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
-
-@@ -2391,7 +2392,8 @@ static struct attribute *ext4_attrs[] =
- ATTR_LIST(mb_max_to_scan),
- ATTR_LIST(mb_min_to_scan),
- ATTR_LIST(mb_order2_req),
-- ATTR_LIST(mb_stream_req),
-+ ATTR_LIST(mb_small_req),
-+ ATTR_LIST(mb_large_req),
- ATTR_LIST(mb_group_prealloc),
- ATTR_LIST(max_writeback_mb_bump),
- NULL,
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -3070,6 +3070,11 @@ static int ext4_da_writepages(struct add
- if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
- return -EROFS;
-
-+ if (wbc->nr_to_write < sbi->s_mb_small_req) {
-+ nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
-+ wbc->nr_to_write = sbi->s_mb_small_req;
-+ }
-+
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
-
+++ /dev/null
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -690,7 +690,12 @@ static void ext4_put_super(struct super_
-
- for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(sbi->s_group_desc[i]);
-- kfree(sbi->s_group_desc);
-+
-+ if (is_vmalloc_addr(sbi->s_group_desc))
-+ vfree(sbi->s_group_desc);
-+ else
-+ kfree(sbi->s_group_desc);
-+
- if (is_vmalloc_addr(sbi->s_flex_groups))
- vfree(sbi->s_flex_groups);
- else
-@@ -2938,12 +2943,13 @@ static int ext4_fill_super(struct super_
- unsigned long offset = 0;
- unsigned long journal_devnum = 0;
- unsigned long def_mount_opts;
-- struct inode *root;
-+ struct inode *root = NULL;
- char *cp;
- const char *descr;
- int ret = -EINVAL;
- int blocksize;
- unsigned int db_count;
-+ size_t size;
- unsigned int i;
- int needs_recovery, has_huge_files;
- __u64 blocks_count;
-@@ -3286,11 +3292,18 @@ static int ext4_fill_super(struct super_
- (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
- db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
- EXT4_DESC_PER_BLOCK(sb);
-- sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
-- GFP_KERNEL);
-+ size = (size_t)db_count * sizeof(struct buffer_head *);
-+ sbi->s_group_desc = kzalloc(size, GFP_KERNEL);
- if (sbi->s_group_desc == NULL) {
-- ext4_msg(sb, KERN_ERR, "not enough memory");
-- goto failed_mount;
-+ sbi->s_group_desc = vmalloc(size);
-+ if (sbi->s_group_desc != NULL) {
-+ memset(sbi->s_group_desc, 0, size);
-+ } else {
-+ ext4_msg(sb, KERN_ERR, "no memory for %u groups (%u)\n",
-+ sbi->s_groups_count, (unsigned int)size);
-+ ret = -ENOMEM;
-+ goto failed_mount;
-+ }
- }
-
- #ifdef __BIG_ENDIAN
-@@ -3505,12 +3518,10 @@ no_journal:
- }
- if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
-- iput(root);
- goto failed_mount4;
- }
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
-- iput(root);
- ext4_msg(sb, KERN_ERR, "get root dentry failed");
- ret = -ENOMEM;
- goto failed_mount4;
-@@ -3562,6 +3573,7 @@ no_journal:
- if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
- err);
-+ ret = err;
- goto failed_mount5;
- }
-
-@@ -3616,6 +3628,8 @@ failed_mount4a:
- dput(sb->s_root);
- sb->s_root = NULL;
- failed_mount4:
-+ iput(root);
-+ sb->s_root = NULL;
- ext4_msg(sb, KERN_ERR, "mount failed");
- destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
- failed_mount_wq:
-@@ -3639,7 +3653,11 @@ failed_mount3:
- failed_mount2:
- for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
-- kfree(sbi->s_group_desc);
-+
-+ if (is_vmalloc_addr(sbi->s_group_desc))
-+ vfree(sbi->s_group_desc);
-+ else
-+ kfree(sbi->s_group_desc);
- failed_mount:
- if (sbi->s_proc) {
- remove_proc_entry(sb->s_id, ext4_proc_root);
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -23,6 +23,7 @@
-
- #include "mballoc.h"
- #include <linux/debugfs.h>
-+#include <linux/vmalloc.h>
- #include <trace/events/ext4.h>
-
- /*
-@@ -2408,24 +2409,37 @@ static int ext4_mb_init_backend(struct s
- while (array_size < sizeof(*sbi->s_group_info) *
- num_meta_group_infos_max)
- array_size = array_size << 1;
-- /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-- * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-- * So a two level scheme suffices for now. */
-- sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
-+ /* A 16TB filesystem with 64-bit pointers requires an 8192 byte
-+ * kmalloc(). Filesystems larger than 2^32 blocks (16TB normally)
-+ * have group descriptors at least twice as large (64 bytes or
-+ * more vs. 32 bytes for traditional ext3 filesystems), so a 128TB
-+ * filesystem needs a 128kB allocation, which may need vmalloc(). */
-+ sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
- if (sbi->s_group_info == NULL) {
-- printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
-- return -ENOMEM;
-+ sbi->s_group_info = vmalloc(array_size);
-+ if (sbi->s_group_info != NULL) {
-+ memset(sbi->s_group_info, 0, array_size);
-+ } else {
-+ ext4_msg(sb, KERN_ERR, "no memory for groupinfo (%u)\n",
-+ array_size);
-+ return -ENOMEM;
-+ }
- }
- sbi->s_buddy_cache = new_inode(sb);
- if (sbi->s_buddy_cache == NULL) {
-- printk(KERN_ERR "EXT4-fs: can't get new inode\n");
-+ ext4_msg(sb, KERN_ERR, "can't get new inode\n");
- goto err_freesgi;
- }
-+ /* To avoid potentially colliding with an valid on-disk inode number,
-+ * use EXT4_BAD_INO for the buddy cache inode number. This inode is
-+ * not in the inode hash, so it should never be found by iget(), but
-+ * this will avoid confusion if it ever shows up during debugging. */
-+ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
- EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
- for (i = 0; i < ngroups; i++) {
- desc = ext4_get_group_desc(sb, i, NULL);
- if (desc == NULL) {
-- printk(KERN_ERR
-+ ext4_msg(sb, KERN_ERR,
- "EXT4-fs: can't read descriptor %u\n", i);
- goto err_freebuddy;
- }
-@@ -2443,7 +2457,10 @@ err_freebuddy:
- kfree(sbi->s_group_info[i]);
- iput(sbi->s_buddy_cache);
- err_freesgi:
-- kfree(sbi->s_group_info);
-+ if (is_vmalloc_addr(sbi->s_group_info))
-+ vfree(sbi->s_group_info);
-+ else
-+ kfree(sbi->s_group_info);
- return -ENOMEM;
- }
-
-@@ -2627,7 +2644,10 @@ int ext4_mb_release(struct super_block *
- EXT4_DESC_PER_BLOCK_BITS(sb);
- for (i = 0; i < num_meta_group_infos; i++)
- kfree(sbi->s_group_info[i]);
-- kfree(sbi->s_group_info);
-+ if (is_vmalloc_addr(sbi->s_group_info))
-+ vfree(sbi->s_group_info);
-+ else
-+ kfree(sbi->s_group_info);
- }
- kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
+++ /dev/null
-From: Theodore Ts'o <tytso@mit.edu>
-
-From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
-
-
-Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
- ext4_get_blocks()
-
-Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
-which uses a much smaller structure, struct ext4_map_blocks which is
-20 bytes, as opposed to a struct buffer_head, which nearly 5 times
-bigger on an x86_64 machine. By switching things to use
-ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
-since we can avoid allocating a struct buffer_head on the stack.
-
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2016-07-15 09:52:28.000000000 +0300
-+++ linux-stage/fs/ext4/ext4.h 2016-07-15 09:52:29.000000000 +0300
-@@ -142,10 +142,8 @@ struct ext4_allocation_request {
- #define EXT4_MAP_MAPPED (1 << BH_Mapped)
- #define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
- #define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
--#define EXT4_MAP_UNINIT (1 << BH_Uninit)
- #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
-- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
-- EXT4_MAP_UNINIT)
-+ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
-
- struct ext4_map_blocks {
- ext4_fsblk_t m_pblk;
-@@ -2184,9 +2182,9 @@ extern int ext4_ext_tree_init(handle_t *
- extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
- extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
- int chunk);
--extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int max_blocks,
-- struct buffer_head *bh_result, int flags);
-+#define HAVE_EXT4_MAP_BLOCKS
-+extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags);
- extern void ext4_ext_truncate(struct inode *);
- extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
- loff_t length);
-@@ -2196,6 +2194,8 @@ extern long ext4_fallocate(struct inode
- loff_t len);
- extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- ssize_t len);
-+extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags);
- extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
- sector_t block, unsigned int max_blocks,
- struct buffer_head *bh, int flags);
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c 2016-07-15 09:52:28.000000000 +0300
-+++ linux-stage/fs/ext4/extents.c 2016-07-15 09:53:10.000000000 +0300
-@@ -2960,7 +2960,7 @@ fix_extent_len:
-
- #define EXT4_EXT_ZERO_LEN 7
- /*
-- * This function is called by ext4_ext_get_blocks() if someone tries to write
-+ * This function is called by ext4_ext_map_blocks() if someone tries to write
- * to an uninitialized extent. It may result in splitting the uninitialized
- * extent into multiple extents (upto three - one initialized and two
- * uninitialized).
-@@ -2970,11 +2970,10 @@ fix_extent_len:
- * c> Splits in three extents: Somone is writing in middle of the extent
- */
- static int ext4_ext_convert_to_initialized(handle_t *handle,
-- struct inode *inode,
-- struct ext4_ext_path *path,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
-- int flags)
-+ struct inode *inode,
-+ struct ext4_map_blocks *map,
-+ struct ext4_ext_path *path,
-+ int flags)
- {
- struct ext4_extent *ex, newex, orig_ex;
- struct ext4_extent *ex1 = NULL;
-@@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
-
- ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
-- (unsigned long long)iblock, max_blocks);
-+ (unsigned long long)map->m_lblk, map->m_len);
-
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
-- if (eof_block < iblock + max_blocks)
-- eof_block = iblock + max_blocks;
-+ if (eof_block < map->m_lblk + map->m_len)
-+ eof_block = map->m_lblk + map->m_len;
-
- depth = ext_depth(inode);
- eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
- ee_block = le32_to_cpu(ex->ee_block);
- ee_len = ext4_ext_get_actual_len(ex);
-- allocated = ee_len - (iblock - ee_block);
-- newblock = iblock - ee_block + ext4_ext_pblock(ex);
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
- ex2 = ex;
- orig_ex.ee_block = ex->ee_block;
-@@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
- return allocated;
- }
-
-- /* ex1: ee_block to iblock - 1 : uninitialized */
-- if (iblock > ee_block) {
-+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-+ if (map->m_lblk > ee_block) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
-@@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
- * we insert ex3, if ex1 is NULL. This is to avoid temporary
- * overlap of blocks.
- */
-- if (!ex1 && allocated > max_blocks)
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ if (!ex1 && allocated > map->m_len)
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- /* ex3: to ee_block + ee_len : uninitialised */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unsigned int newdepth;
- /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
- if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
- /*
-- * iblock == ee_block is handled by the zerouout
-+ * map->m_lblk == ee_block is handled by the zerouout
- * at the beginning.
- * Mark first half uninitialized.
- * Mark second half initialized and zero out the
-@@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_dirty(handle, inode, path + depth);
-
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex3, newblock);
- ex3->ee_len = cpu_to_le16(allocated);
- err = ext4_ext_insert_extent(handle, inode, path,
-@@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex,
- ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
- */
- depth = ext_depth(inode);
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode,
-- iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk,
-+ path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- return err;
-@@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
- return allocated;
- }
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
-- ext4_ext_store_pblock(ex3, newblock + max_blocks);
-- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
-+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
- ext4_ext_mark_uninitialized(ex3);
- err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC && may_zeroout) {
-@@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zeroed the full extent */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
-
- depth = newdepth;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
- if (err)
- goto out;
-
-- allocated = max_blocks;
-+ allocated = map->m_len;
-
- /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
- * to insert a extent in the middle zerout directly
- * otherwise give the extent a chance to merge to left
- */
- if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
-- iblock != ee_block && may_zeroout) {
-+ map->m_lblk != ee_block && may_zeroout) {
- err = ext4_ext_zeroout(inode, &orig_ex);
- if (err)
- goto fix_extent_len;
-@@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zero out the first half */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
- }
- }
-@@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
- */
- if (ex1 && ex1 != ex) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
- }
-- /* ex2: iblock to iblock + maxblocks-1 : initialised */
-- ex2->ee_block = cpu_to_le32(iblock);
-+ /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
-+ ex2->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex2, newblock);
- ex2->ee_len = cpu_to_le16(allocated);
- if (ex2 != ex)
-@@ -3277,7 +3276,7 @@ fix_extent_len:
- }
-
- /*
-- * This function is called by ext4_ext_get_blocks() from
-+ * This function is called by ext4_ext_map_blocks() from
- * ext4_get_blocks_dio_write() when DIO to write
- * to an uninitialized extent.
- *
-@@ -3300,9 +3299,8 @@ fix_extent_len:
- */
- static int ext4_split_unwritten_extents(handle_t *handle,
- struct inode *inode,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
- int flags)
- {
- struct ext4_extent *ex, newex, orig_ex;
-@@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
-
- ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
-- (unsigned long long)iblock, max_blocks);
-+ (unsigned long long)map->m_lblk, map->m_len);
-
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
-- if (eof_block < iblock + max_blocks)
-- eof_block = iblock + max_blocks;
-+ if (eof_block < map->m_lblk + map->m_len)
-+ eof_block = map->m_lblk + map->m_len;
-
- depth = ext_depth(inode);
- eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
- ee_block = le32_to_cpu(ex->ee_block);
- ee_len = ext4_ext_get_actual_len(ex);
-- allocated = ee_len - (iblock - ee_block);
-- newblock = iblock - ee_block + ext4_ext_pblock(ex);
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
- ex2 = ex;
- orig_ex.ee_block = ex->ee_block;
-@@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
- * block where the write begins, and the write completely
- * covers the extent, then we don't need to split it.
- */
-- if ((iblock == ee_block) && (allocated <= max_blocks))
-+ if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
- return allocated;
-
- err = ext4_ext_get_access(handle, inode, path + depth);
- if (err)
- goto out;
-- /* ex1: ee_block to iblock - 1 : uninitialized */
-- if (iblock > ee_block) {
-+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-+ if (map->m_lblk > ee_block) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
-@@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
- * we insert ex3, if ex1 is NULL. This is to avoid temporary
- * overlap of blocks.
- */
-- if (!ex1 && allocated > max_blocks)
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ if (!ex1 && allocated > map->m_len)
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- /* ex3: to ee_block + ee_len : uninitialised */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unsigned int newdepth;
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
-- ext4_ext_store_pblock(ex3, newblock + max_blocks);
-- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
-+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
- ext4_ext_mark_uninitialized(ex3);
- err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC && may_zeroout) {
-@@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
- err = ext4_ext_zeroout(inode, ex3);
- if (err)
- goto fix_extent_len;
-- max_blocks = allocated;
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ map->m_len = allocated;
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- goto skip;
- }
- err = ext4_ext_zeroout(inode, &orig_ex);
-@@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zeroed the full extent */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
-
- depth = newdepth;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
- err = ext4_ext_get_access(handle, inode, path + depth);
- if (err)
- goto out;
--
-- allocated = max_blocks;
-+ allocated = map->m_len;
- }
- skip:
- /*
-@@ -3457,16 +3454,16 @@ skip:
- */
- if (ex1 && ex1 != ex) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
- }
- /*
-- * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
-- * uninitialised still.
-+ * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
-+ * using direct I/O, uninitialised still.
- */
-- ex2->ee_block = cpu_to_le32(iblock);
-+ ex2->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex2, newblock);
- ex2->ee_len = cpu_to_le16(allocated);
- ext4_ext_mark_uninitialized(ex2);
-@@ -3506,8 +3503,7 @@ fix_extent_len:
-
- static int ext4_convert_unwritten_extents_dio(handle_t *handle,
- struct inode *inode,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path)
- {
- struct ext4_extent *ex;
-@@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
-
- /* If extent is larger than requested then split is required */
-
-- if (ee_block != iblock || ee_len > max_blocks) {
-- err = ext4_split_unwritten_extents(handle, inode, path,
-- iblock, max_blocks,
-+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
-+ err = ext4_split_unwritten_extents(handle, inode, map, path,
- EXT4_EXT_DATA_VALID);
- if (err < 0)
- goto out;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3627,10 +3622,9 @@ out:
-
- static int
- ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int max_blocks,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path, int flags,
-- unsigned int allocated, struct buffer_head *bh_result,
-- ext4_fsblk_t newblock)
-+ unsigned int allocated, ext4_fsblk_t newblock)
- {
- int ret = 0;
- int err = 0;
-@@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
-
- ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
- "block %llu, max_blocks %u, flags %d, allocated %u",
-- inode->i_ino, (unsigned long long)iblock, max_blocks,
-+ inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
- flags, allocated);
- ext4_ext_show_leaf(inode, path);
-
-@@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
- /* DIO get_block() before submit the IO, split the extent */
- if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
- EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
-- ret = ext4_split_unwritten_extents(handle,
-- inode, path, iblock,
-- max_blocks, flags);
-+ ret = ext4_split_unwritten_extents(handle, inode, map,
-+ path, flags);
- /*
- * Flag the inode(non aio case) or end_io struct (aio case)
- * that this IO needs to convertion to written when IO is
-@@ -3670,12 +3663,11 @@ ext4_ext_handle_uninitialized_extents(ha
- if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
- EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
- ret = ext4_convert_unwritten_extents_dio(handle, inode,
-- iblock, max_blocks,
-- path);
-+ map, path);
- if (ret >= 0) {
- ext4_update_inode_fsync_trans(handle, inode, 1);
-- err = check_eofblocks_fl(handle, inode, iblock, path,
-- max_blocks);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
-+ map->m_len);
- } else
- err = ret;
- goto out2;
-@@ -3697,18 +3689,15 @@ ext4_ext_handle_uninitialized_extents(ha
- * the buffer head will be unmapped so that
- * a read from the block returns 0s.
- */
-- set_buffer_unwritten(bh_result);
-+ map->m_flags |= EXT4_MAP_UNWRITTEN;
- goto out1;
- }
-
- /* buffered write, writepage time, convert*/
-- ret = ext4_ext_convert_to_initialized(handle, inode,
-- path, iblock,
-- max_blocks,
-- flags);
-+ ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
- if (ret >= 0) {
- ext4_update_inode_fsync_trans(handle, inode, 1);
-- err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
- if (err < 0)
- goto out2;
- }
-@@ -3718,7 +3707,7 @@ out:
- goto out2;
- } else
- allocated = ret;
-- set_buffer_new(bh_result);
-+ map->m_flags |= EXT4_MAP_NEW;
- /*
- * if we allocated more blocks than requested
- * we need to make sure we unmap the extra block
-@@ -3726,11 +3715,11 @@ out:
- * unmapped later when we find the buffer_head marked
- * new.
- */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
-- newblock + max_blocks,
-- allocated - max_blocks);
-- allocated = max_blocks;
-+ newblock + map->m_len,
-+ allocated - map->m_len);
-+ allocated = map->m_len;
- }
-
- /*
-@@ -3744,13 +3733,13 @@ out:
- ext4_da_update_reserve_space(inode, allocated, 0);
-
- map_out:
-- set_buffer_mapped(bh_result);
-+ map->m_flags |= EXT4_MAP_MAPPED;
- out1:
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
-- bh_result->b_bdev = inode->i_sb->s_bdev;
-- bh_result->b_blocknr = newblock;
-+ map->m_pblk = newblock;
-+ map->m_len = allocated;
- out2:
- if (path) {
- ext4_ext_drop_refs(path);
-@@ -3777,10 +3766,8 @@ out2:
- *
- * return < 0, error case.
- */
--int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks, struct buffer_head *bh_result,
-- int flags)
-+int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags)
- {
- struct ext4_ext_path *path = NULL;
- struct ext4_extent_header *eh;
-@@ -3791,12 +3778,11 @@ int ext4_ext_get_blocks(handle_t *handle
- struct ext4_allocation_request ar;
- ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
-
-- __clear_bit(BH_New, &bh_result->b_state);
- ext_debug("blocks %u/%u requested for inode %lu\n",
-- iblock, max_blocks, inode->i_ino);
-+ map->m_lblk, map->m_len, inode->i_ino);
-
- /* check in cache */
-- if (ext4_ext_in_cache(inode, iblock, &newex)) {
-+ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
- if (!newex.ee_start_lo && !newex.ee_start_hi) {
- if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
- /*
-@@ -3808,18 +3794,18 @@ int ext4_ext_get_blocks(handle_t *handle
- /* we should allocate requested block */
- } else {
- /* block is already allocated */
-- newblock = iblock
-+ newblock = map->m_lblk
- - le32_to_cpu(newex.ee_block)
- + ext4_ext_pblock(&newex);
- /* number of remaining blocks in the extent */
- allocated = ext4_ext_get_actual_len(&newex) -
-- (iblock - le32_to_cpu(newex.ee_block));
-+ (map->m_lblk - le32_to_cpu(newex.ee_block));
- goto out;
- }
- }
-
- /* find extent for this block */
-- path = ext4_ext_find_extent(inode, iblock, NULL);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- path = NULL;
-@@ -3836,7 +3822,7 @@ int ext4_ext_get_blocks(handle_t *handle
- if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
- EXT4_ERROR_INODE(inode, "bad extent address "
- "iblock: %d, depth: %d pblock %lld",
-- iblock, depth, path[depth].p_block);
-+ map->m_lblk, depth, path[depth].p_block);
- err = -EIO;
- goto out2;
- }
-@@ -3854,11 +3840,11 @@ int ext4_ext_get_blocks(handle_t *handle
- */
- ee_len = ext4_ext_get_actual_len(ex);
- /* if found extent covers block, simply return it */
-- if (in_range(iblock, ee_block, ee_len)) {
-- newblock = iblock - ee_block + ee_start;
-+ if (in_range(map->m_lblk, ee_block, ee_len)) {
-+ newblock = map->m_lblk - ee_block + ee_start;
- /* number of remaining blocks in the extent */
-- allocated = ee_len - (iblock - ee_block);
-- ext_debug("%u fit into %u:%d -> %llu\n", iblock,
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
- ee_block, ee_len, newblock);
-
- /*
-@@ -3870,9 +3856,9 @@ int ext4_ext_get_blocks(handle_t *handle
- ee_len, ee_start);
- goto out;
- }
-- ret = ext4_ext_handle_uninitialized_extents(
-- handle, inode, iblock, max_blocks, path,
-- flags, allocated, bh_result, newblock);
-+ ret = ext4_ext_handle_uninitialized_extents(handle,
-+ inode, map, path, flags, allocated,
-+ newblock);
- return ret;
- }
- }
-@@ -3886,7 +3872,7 @@ int ext4_ext_get_blocks(handle_t *handle
- * put just found gap into cache to speed up
- * subsequent requests
- */
-- ext4_ext_put_gap_in_cache(inode, path, iblock);
-+ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
- goto out2;
- }
- /*
-@@ -3894,11 +3880,11 @@ int ext4_ext_get_blocks(handle_t *handle
- */
-
- /* find neighbour allocated blocks */
-- ar.lleft = iblock;
-+ ar.lleft = map->m_lblk;
- err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
- if (err)
- goto out2;
-- ar.lright = iblock;
-+ ar.lright = map->m_lblk;
- err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
- if (err)
- goto out2;
-@@ -3909,26 +3895,26 @@ int ext4_ext_get_blocks(handle_t *handle
- * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
- * EXT_UNINIT_MAX_LEN.
- */
-- if (max_blocks > EXT_INIT_MAX_LEN &&
-+ if (map->m_len > EXT_INIT_MAX_LEN &&
- !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
-- max_blocks = EXT_INIT_MAX_LEN;
-- else if (max_blocks > EXT_UNINIT_MAX_LEN &&
-+ map->m_len = EXT_INIT_MAX_LEN;
-+ else if (map->m_len > EXT_UNINIT_MAX_LEN &&
- (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
-- max_blocks = EXT_UNINIT_MAX_LEN;
-+ map->m_len = EXT_UNINIT_MAX_LEN;
-
-- /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
-- newex.ee_block = cpu_to_le32(iblock);
-- newex.ee_len = cpu_to_le16(max_blocks);
-+ /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
-+ newex.ee_block = cpu_to_le32(map->m_lblk);
-+ newex.ee_len = cpu_to_le16(map->m_len);
- err = ext4_ext_check_overlap(inode, &newex, path);
- if (err)
- allocated = ext4_ext_get_actual_len(&newex);
- else
-- allocated = max_blocks;
-+ allocated = map->m_len;
-
- /* allocate new block */
- ar.inode = inode;
-- ar.goal = ext4_ext_find_goal(inode, path, iblock);
-- ar.logical = iblock;
-+ ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
-+ ar.logical = map->m_lblk;
- ar.len = allocated;
- if (S_ISREG(inode->i_mode))
- ar.flags = EXT4_MB_HINT_DATA;
-@@ -3967,7 +3953,7 @@ int ext4_ext_get_blocks(handle_t *handle
- }
- }
-
-- err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
- if (err)
- goto out2;
-
-@@ -3987,9 +3973,9 @@ int ext4_ext_get_blocks(handle_t *handle
- /* previous routine could use block we allocated */
- newblock = ext4_ext_pblock(&newex);
- allocated = ext4_ext_get_actual_len(&newex);
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-- set_buffer_new(bh_result);
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
-+ map->m_flags |= EXT4_MAP_NEW;
-
- /*
- * Update reserved blocks/metadata blocks after successful
-@@ -4003,17 +3989,17 @@ int ext4_ext_get_blocks(handle_t *handle
- * when it is _not_ an uninitialized extent.
- */
- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-- ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
-+ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
- ext4_update_inode_fsync_trans(handle, inode, 1);
- } else
- ext4_update_inode_fsync_trans(handle, inode, 0);
- out:
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
-- set_buffer_mapped(bh_result);
-- bh_result->b_bdev = inode->i_sb->s_bdev;
-- bh_result->b_blocknr = newblock;
-+ map->m_flags |= EXT4_MAP_MAPPED;
-+ map->m_pblk = newblock;
-+ map->m_len = allocated;
- out2:
- if (path) {
- ext4_ext_drop_refs(path);
-@@ -4196,7 +4182,7 @@ retry:
- if (ret <= 0) {
- #ifdef EXT4FS_DEBUG
- WARN_ON(ret <= 0);
-- printk(KERN_ERR "%s: ext4_ext_get_blocks "
-+ printk(KERN_ERR "%s: ext4_ext_map_blocks "
- "returned error inode#%lu, block=%u, "
- "max_blocks=%u", __func__,
- inode->i_ino, block, max_blocks);
-@@ -4709,6 +4695,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
- EXPORT_SYMBOL(ext4_mb_new_blocks);
- EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
- EXPORT_SYMBOL(ext4_mark_inode_dirty);
--EXPORT_SYMBOL(ext4_ext_walk_space);
- EXPORT_SYMBOL(ext4_ext_find_extent);
- EXPORT_SYMBOL(ext4_ext_drop_refs);
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c 2016-07-15 09:52:28.000000000 +0300
-+++ linux-stage/fs/ext4/inode.c 2016-07-15 09:52:29.000000000 +0300
-@@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
- int ret;
-
- /*
-- * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
-+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
- * moment, get_block can be called only for blocks inside i_size since
- * page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
-@@ -970,9 +970,9 @@ err_out:
- }
-
- /*
-- * The ext4_ind_get_blocks() function handles non-extents inodes
-+ * The ext4_ind_map_blocks() function handles non-extents inodes
- * (i.e., using the traditional indirect/double-indirect i_blocks
-- * scheme) for ext4_get_blocks().
-+ * scheme) for ext4_map_blocks().
- *
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
-@@ -991,15 +991,14 @@ err_out:
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- *
-- * The ext4_ind_get_blocks() function should be called with
-+ * The ext4_ind_map_blocks() function should be called with
- * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
- * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
- * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
- * blocks.
- */
--static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int maxblocks,
-- struct buffer_head *bh_result,
-+static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map,
- int flags)
- {
- int err = -EIO;
-@@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t
-
- J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
- J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
-- depth = ext4_block_to_path(inode, iblock, offsets,
-+ depth = ext4_block_to_path(inode, map->m_lblk, offsets,
- &blocks_to_boundary);
-
- if (depth == 0)
-@@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t
- /* Simplest case - block found, no allocation needed */
- if (!partial) {
- first_block = le32_to_cpu(chain[depth - 1].key);
-- clear_buffer_new(bh_result);
- count++;
- /*map more blocks*/
-- while (count < maxblocks && count <= blocks_to_boundary) {
-+ while (count < map->m_len && count <= blocks_to_boundary) {
- ext4_fsblk_t blk;
-
- blk = le32_to_cpu(*(chain[depth-1].p + count));
-@@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t
- /*
- * Okay, we need to do block allocation.
- */
-- goal = ext4_find_goal(inode, iblock, partial);
-+ goal = ext4_find_goal(inode, map->m_lblk, partial);
-
- /* the number of blocks need to allocate for [d,t]indirect blocks */
- indirect_blks = (chain + depth) - partial - 1;
-@@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t
- * direct blocks to allocate for this branch.
- */
- count = ext4_blks_to_allocate(partial, indirect_blks,
-- maxblocks, blocks_to_boundary);
-+ map->m_len, blocks_to_boundary);
- /*
- * Block out ext4_truncate while we alter the tree
- */
-- err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
-+ err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
- &count, goal,
- offsets + (partial - chain), partial);
-
-@@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t
- * may need to return -EAGAIN upwards in the worst case. --sct
- */
- if (!err)
-- err = ext4_splice_branch(handle, inode, iblock,
-+ err = ext4_splice_branch(handle, inode, map->m_lblk,
- partial, indirect_blks, count);
- if (err)
- goto cleanup;
-
-- set_buffer_new(bh_result);
-+ map->m_flags |= EXT4_MAP_NEW;
-
- ext4_update_inode_fsync_trans(handle, inode, 1);
- got_it:
-- map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-+ map->m_flags |= EXT4_MAP_MAPPED;
-+ map->m_pblk = le32_to_cpu(chain[depth-1].key);
-+ map->m_len = count;
- if (count > blocks_to_boundary)
-- set_buffer_boundary(bh_result);
-+ map->m_flags |= EXT4_MAP_BOUNDARY;
- err = count;
- /* Clean up and exit */
- partial = chain + depth - 1; /* the whole chain */
-@@ -1096,7 +1096,6 @@ cleanup:
- brelse(partial->bh);
- partial--;
- }
-- BUFFER_TRACE(bh_result, "returned");
- out:
- return err;
- }
-@@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
- }
-
- /*
-- * The ext4_get_blocks() function tries to look up the requested blocks,
-+ * The ext4_map_blocks() function tries to look up the requested blocks,
- * and returns if the blocks are already mapped.
- *
- * Otherwise it takes the write lock of the i_data_sem and allocate blocks
- * and store the allocated blocks in the result buffer head and mark it
- * mapped.
- *
-- * If file type is extents based, it will call ext4_ext_get_blocks(),
-- * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
-+ * If file type is extents based, it will call ext4_ext_map_blocks(),
-+ * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
- * based files
- *
- * On success, it returns the number of blocks being mapped or allocate.
-@@ -1312,35 +1311,31 @@ static pgoff_t ext4_num_dirty_pages(stru
- *
- * It returns the error in case of allocation failure.
- */
--int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
-- unsigned int max_blocks, struct buffer_head *bh,
-- int flags)
-+int ext4_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags)
- {
- int retval;
-
-- clear_buffer_mapped(bh);
-- clear_buffer_unwritten(bh);
-+ map->m_flags = 0;
-+ ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
-+ "logical block %lu\n", inode->i_ino, flags, map->m_len,
-+ (unsigned long) map->m_lblk);
-
-- ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
-- "logical block %lu\n", inode->i_ino, flags, max_blocks,
-- (unsigned long)block);
- /*
- * Try to see if we can get the block without requesting a new
- * file system block.
- */
- down_read((&EXT4_I(inode)->i_data_sem));
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
-- bh, 0);
-+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
- } else {
-- retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
-- bh, 0);
-+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
- }
- up_read((&EXT4_I(inode)->i_data_sem));
-
-- if (retval > 0 && buffer_mapped(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, "file system corruption",
-- block, bh->b_blocknr, retval);
-+ map->m_lblk, map->m_pblk, retval);
- if (ret != 0)
- return ret;
- }
-@@ -1356,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
- * ext4_ext_get_block() returns th create = 0
- * with buffer head unmapped.
- */
-- if (retval > 0 && buffer_mapped(bh))
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
- return retval;
-
- /*
-@@ -1369,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
- * of BH_Unwritten and BH_Mapped flags being simultaneously
- * set on the buffer_head.
- */
-- clear_buffer_unwritten(bh);
-+ map->m_flags &= ~EXT4_MAP_UNWRITTEN;
-
- /*
- * New blocks allocate and/or writing to uninitialized extent
-@@ -1392,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
- * could have changed the inode type in between
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
-- bh, flags);
-+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
- } else {
-- retval = ext4_ind_get_blocks(handle, inode, block,
-- max_blocks, bh, flags);
-+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
-
-- if (retval > 0 && buffer_new(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
- /*
- * We allocated new blocks which will result in
- * i_data's format changing. Force the migrate
-@@ -1421,15 +1414,38 @@ int ext4_get_blocks(handle_t *handle, st
- EXT4_I(inode)->i_delalloc_reserved_flag = 0;
-
- up_write((&EXT4_I(inode)->i_data_sem));
-- if (retval > 0 && buffer_mapped(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, "file system "
- "corruption after allocation",
-- block, bh->b_blocknr, retval);
-+ map->m_lblk, map->m_pblk,
-+ retval);
- if (ret != 0)
- return ret;
- }
- return retval;
- }
-+EXPORT_SYMBOL(ext4_map_blocks);
-+
-+int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
-+ unsigned int max_blocks, struct buffer_head *bh,
-+ int flags)
-+{
-+ struct ext4_map_blocks map;
-+ int ret;
-+
-+ map.m_lblk = block;
-+ map.m_len = max_blocks;
-+
-+ ret = ext4_map_blocks(handle, inode, &map, flags);
-+ if (ret < 0)
-+ return ret;
-+
-+ bh->b_blocknr = map.m_pblk;
-+ bh->b_size = inode->i_sb->s_blocksize * map.m_len;
-+ bh->b_bdev = inode->i_sb->s_bdev;
-+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
-+ return ret;
-+}
-
- /* Maximum number of blocks we map for direct IO at once. */
- #define DIO_MAX_BLOCKS 4096
+++ /dev/null
-diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index 1ed737f..77e2fb3 100644
---- a/fs/ext4/extents.c
-+++ b/fs/ext4/extents.c
-@@ -2276,9 +2276,10 @@ static int ext4_remove_blocks(handle_t *handle,
- unsigned short ee_len = ext4_ext_get_actual_len(ex);
- int i, metadata = 0, flags =0;
-
-- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
- metadata = 1;
- flags = EXT4_FREE_BLOCKS_METADATA;
-+ }
- #ifdef EXTENTS_STATS
- {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+++ /dev/null
-From: Aditya Kali <adityakali@google.com>
-
-This patch is an attempt towards supporting quotas as first class
-feature in ext4. It is based on the proposal at:
-https://ext4.wiki.kernel.org/index.php/Design_For_1st_Class_Quota_in_Ext4
-This patch introduces a new feature - EXT4_FEATURE_RO_COMPAT_QUOTA which, when
-turned on, enables quota accounting at mount time iteself. Also, the
-quota inodes are stored in two additional superblock fields.
-Some changes introduced by this patch that should be pointed out are:
-1) Two new ext4-superblock fields - s_usr_quota_inum and s_grp_quota_inum
- for storing the quota inodes in use.
-2) If the QUOTA feature and corresponding quota inodes are set in superblock,
- Quotas are turned on at mount time irrespective of the quota mount options.
- Thus the mount options 'quota', 'usrquota' and 'grpquota' are completely
- ignored with the new QUOTA feature flag.
-3) Default quota inodes are: inode#3 for tracking userquota and inode#4 for
- tracking group quota. The superblock fields can be set to use other inodes
- as well.
-4) mke2fs or tune2fs will initialize these inodes when quota feature is
- being set. The default reserved inodes will not be visible to user as
- regular files.
-5) Once quotas are turned on, they cannot be turned off while the FS is
- mounted. This is because we do not want to let the quota get inconsistent.
-6) With the QUOTA feature set, since the quota inodes are hidden, some of the
- utilities from quota-tools will no longer work correctly. Instead, e2fsprogs
- will include support for fixing the quota files.
-7) Support is only for the new V2 quota file format.
-
-Signed-off-by: Aditya Kali <adityakali@google.com>
----
-Index: linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-431.17.1.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/ext4.h
-@@ -187,6 +187,8 @@ typedef struct ext4_io_end {
- */
- #define EXT4_BAD_INO 1 /* Bad blocks inode */
- #define EXT4_ROOT_INO 2 /* Root inode */
-+#define EXT4_USR_QUOTA_INO 3 /* User quota inode */
-+#define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */
- #define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */
- #define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */
- #define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */
-@@ -1046,7 +1048,9 @@ struct ext4_super_block {
- __u8 s_last_error_func[32]; /* function where the error happened */
- #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
- __u8 s_mount_opts[64];
-- __le32 s_reserved[112]; /* Padding to the end of the block */
-+ __le32 s_usr_quota_inum; /* inode for tracking user quota */
-+ __le32 s_grp_quota_inum; /* inode for tracking group quota */
-+ __le32 s_reserved[110]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -1121,6 +1125,7 @@ struct ext4_sb_info {
- #ifdef CONFIG_QUOTA
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
-+ unsigned long s_qf_inums[MAXQUOTAS]; /* Quota file inodes */
- #endif
- unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
- struct rb_root system_blks;
-@@ -1221,6 +1226,8 @@ static inline struct timespec ext4_curre
- static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
- {
- return ino == EXT4_ROOT_INO ||
-+ ino == EXT4_USR_QUOTA_INO ||
-+ ino == EXT4_GRP_QUOTA_INO ||
- ino == EXT4_JOURNAL_INO ||
- ino == EXT4_RESIZE_INO ||
- (ino >= EXT4_FIRST_INO(sb) &&
-@@ -1325,6 +1332,7 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
- #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-+#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
-
- #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -1357,7 +1365,8 @@ EXT4_INODE_BIT_FNS(state, state_flags)
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
- EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
- EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
-- EXT4_FEATURE_RO_COMPAT_HUGE_FILE)
-+ EXT4_FEATURE_RO_COMPAT_HUGE_FILE| \
-+ EXT4_FEATURE_RO_COMPAT_QUOTA)
-
- /*
- * Default values for user and/or group using reserved blocks
-Index: linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-2.6.32-431.17.1.el6.x86_64.orig/fs/ext4/ext4_jbd2.h
-+++ linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/ext4_jbd2.h
-@@ -89,14 +89,20 @@
- #ifdef CONFIG_QUOTA
- /* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only data block */
--#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
-+#define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ 1 : 0)
- /* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
--#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
--
--#define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
-+#define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
-+ +3+DQUOT_INIT_REWRITE) : 0)
-+
-+#define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
-+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
-+ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
-+ +3+DQUOT_DEL_REWRITE) : 0)
- #else
- #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
- #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
-Index: linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/super.c
-===================================================================
---- linux-2.6.32-431.17.1.el6.x86_64.orig/fs/ext4/super.c
-+++ linux-2.6.32-431.17.1.el6.x86_64/fs/ext4/super.c
-@@ -116,6 +116,11 @@ void ext4_kvfree(void *ptr)
-
- static int bigendian_extents;
-
-+#ifdef CONFIG_QUOTA
-+static int ext4_acct_on(struct super_block *sb);
-+static int ext4_acct_off(struct super_block *sb);
-+#endif
-+
- ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *bg)
- {
-@@ -704,6 +709,12 @@ static void ext4_put_super(struct super_
-
- ext4_unregister_li_request(sb);
-
-+#ifdef CONFIG_QUOTA
-+ /* disable usage tracking which was enabled at mount time */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ ext4_acct_off(sb);
-+#endif
-+
- flush_workqueue(sbi->dio_unwritten_wq);
- destroy_workqueue(sbi->dio_unwritten_wq);
-
-@@ -2173,14 +2184,22 @@ static void ext4_orphan_cleanup(struct s
- #ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sb->s_flags |= MS_ACTIVE;
-- /* Turn on quotas so that they are updated correctly */
-- for (i = 0; i < MAXQUOTAS; i++) {
-- if (EXT4_SB(sb)->s_qf_names[i]) {
-- int ret = ext4_quota_on_mount(sb, i);
-- if (ret < 0)
-- ext4_msg(sb, KERN_ERR,
-- "Cannot turn on journaled "
-- "quota: error %d", ret);
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ int ret;
-+ ret = ext4_acct_on(sb);
-+ if (ret)
-+ ext4_msg(sb, KERN_ERR, "Failed to turn on usage "
-+ "tracking for quota: error %d", ret);
-+ } else {
-+ /* Turn on quotas so that they are updated correctly */
-+ for (i = 0; i < MAXQUOTAS; i++) {
-+ if (EXT4_SB(sb)->s_qf_names[i]) {
-+ int ret = ext4_quota_on_mount(sb, i);
-+ if (ret < 0)
-+ ext4_msg(sb, KERN_ERR,
-+ "Cannot turn on journaled "
-+ "quota: error %d", ret);
-+ }
- }
- }
- #endif
-@@ -2224,10 +2243,14 @@ static void ext4_orphan_cleanup(struct s
- ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
- PLURAL(nr_truncates));
- #ifdef CONFIG_QUOTA
-- /* Turn quotas off */
-- for (i = 0; i < MAXQUOTAS; i++) {
-- if (sb_dqopt(sb)->files[i])
-- vfs_quota_off(sb, i, 0);
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ ext4_acct_off(sb);
-+ } else {
-+ /* Turn quotas off */
-+ for (i = 0; i < MAXQUOTAS; i++) {
-+ if (sb_dqopt(sb)->files[i])
-+ vfs_quota_off(sb, i, 0);
-+ }
- }
- #endif
- sb->s_flags = s_flags; /* Restore MS_RDONLY status */
-@@ -3486,6 +3509,15 @@ static int ext4_fill_super(struct super_
- #ifdef CONFIG_QUOTA
- sb->s_qcop = &ext4_qctl_operations;
- sb->dq_op = &ext4_quota_operations;
-+
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ /* Use new qctl operations with quota on function that does not
-+ * require user specified quota file path. */
-+ sb->s_qcop = &ext4_qctl_operations;
-+
-+ sbi->s_qf_inums[USRQUOTA] = es->s_usr_quota_inum;
-+ sbi->s_qf_inums[GRPQUOTA] = es->s_grp_quota_inum;
-+ }
- #endif
- INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
- mutex_init(&sbi->s_orphan_lock);
-@@ -3729,8 +3761,31 @@ no_journal:
- } else
- descr = "out journal";
-
-- ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-- "Opts: %s%s", descr, sbi->s_es->s_mount_opts,
-+#ifdef CONFIG_QUOTA
-+ /* Enable space tracking during mount, enforcement can be enabled/disable
-+ * later with quota_on/off */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
-+ !(sb->s_flags & MS_RDONLY)) {
-+ ret = ext4_acct_on(sb);
-+ if (ret) {
-+ ext4_msg(sb, KERN_ERR, "Can't enable usage tracking on "
-+ "a filesystem with the QUOTA feature set");
-+ goto failed_mount8;
-+ }
-+ }
-+#else
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
-+ !(sb->s_flags & MS_RDONLY))
-+ ext4_msg(sb, KERN_WARNING, "Mounting a filesystem with the "
-+ "QUOTA feature set whereas the kernel does not "
-+ "support quota, e2fsck will be required to fix usage "
-+ "information");
-+
-+#endif /* CONFIG_QUOTA */
-+
-+ ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. quota=%s. "
-+ "Opts: %s%s", descr, sb_any_quota_loaded(sb) ? "on" : "off",
-+ sbi->s_es->s_mount_opts,
- *sbi->s_es->s_mount_opts ? "; " : "");
-
- lock_kernel();
-@@ -3741,6 +3796,10 @@ cantfind_ext4:
- ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
- goto failed_mount;
-
-+#ifdef CONFIG_QUOTA
-+failed_mount8:
-+ kobject_del(&sbi->s_kobj);
-+#endif
- failed_mount7:
- ext4_unregister_li_request(sb);
- failed_mount6:
-@@ -4088,6 +4147,12 @@ static int ext4_commit_super(struct supe
- es->s_free_inodes_count =
- cpu_to_le32(percpu_counter_sum_positive(
- &EXT4_SB(sb)->s_freeinodes_counter));
-+#ifdef CONFIG_QUOTA
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ es->s_usr_quota_inum = EXT4_SB(sb)->s_qf_inums[USRQUOTA];
-+ es->s_grp_quota_inum = EXT4_SB(sb)->s_qf_inums[GRPQUOTA];
-+ }
-+#endif
- sb->s_dirt = 0;
- BUFFER_TRACE(sbh, "marking dirty");
- mark_buffer_dirty(sbh);
-@@ -4650,6 +4715,22 @@ static int ext4_quota_on(struct super_bl
- int err;
- struct path path;
-
-+ /* When QUOTA feature is set, quota on enables enforcement, accounting
-+ * being already enabled at mount time */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-+ struct inode *qf_inode;
-+
-+ if (!EXT4_SB(sb)->s_qf_inums[type])
-+ return -EINVAL;
-+ qf_inode = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[type]);
-+ if (IS_ERR(qf_inode))
-+ return PTR_ERR(qf_inode);
-+ err = vfs_quota_enable(qf_inode, type, QFMT_VFS_V1,
-+ DQUOT_LIMITS_ENABLED);
-+ iput(qf_inode);
-+ return err;
-+ }
-+
- if (!test_opt(sb, QUOTA))
- return -EINVAL;
- /* When remounting, no checks are needed and in fact, name is NULL */
-@@ -4749,9 +4830,114 @@ static int ext4_quota_off(struct super_b
- iput(inode);
- }
-
-+ /* When QUOTA feature is set, quota off just disables enforcement but
-+ * leaves accounting on */
-+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED);
-+
- return vfs_quota_off(sb, type, remount);
- }
-
-+/*
-+ * New quota_on function that is used to turn accounting on when QUOTA
-+ * feature is set.
-+ */
-+static int ext4_acct_on(struct super_block *sb)
-+{
-+ struct inode *qf_inode[MAXQUOTAS];
-+ int rc;
-+
-+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) ||
-+ !EXT4_SB(sb)->s_qf_inums[USRQUOTA] ||
-+ !EXT4_SB(sb)->s_qf_inums[GRPQUOTA])
-+ return -EINVAL;
-+
-+ qf_inode[USRQUOTA] = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[USRQUOTA]);
-+ if (IS_ERR(qf_inode[USRQUOTA])) {
-+ EXT4_SB(sb)->s_qf_inums[USRQUOTA] = 0;
-+ return PTR_ERR(qf_inode[USRQUOTA]);
-+ }
-+ qf_inode[GRPQUOTA] = ext4_iget(sb, EXT4_SB(sb)->s_qf_inums[GRPQUOTA]);
-+ if (IS_ERR(qf_inode[GRPQUOTA])) {
-+ iput(qf_inode[USRQUOTA]);
-+ EXT4_SB(sb)->s_qf_inums[GRPQUOTA] = 0;
-+ return PTR_ERR(qf_inode[GRPQUOTA]);
-+ }
-+
-+ /*
-+ * When we journal data on quota file, we have to flush journal to see
-+ * all updates to the file when we bypass pagecache...
-+ */
-+ if (EXT4_SB(sb)->s_journal) {
-+ /*
-+ * We don't need to lock updates but journal_flush() could
-+ * otherwise be livelocked...
-+ */
-+ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
-+ rc = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
-+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
-+ if (rc) {
-+ iput(qf_inode[USRQUOTA]);
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+ }
-+ }
-+
-+ /* only enable quota accounting by default */
-+ rc = vfs_quota_enable(qf_inode[USRQUOTA], USRQUOTA, QFMT_VFS_V1,
-+ DQUOT_USAGE_ENABLED);
-+ iput(qf_inode[USRQUOTA]);
-+ if (rc) {
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+ }
-+ rc = vfs_quota_enable(qf_inode[GRPQUOTA], GRPQUOTA, QFMT_VFS_V1,
-+ DQUOT_USAGE_ENABLED);
-+ iput(qf_inode[GRPQUOTA]);
-+ return rc;
-+}
-+
-+/*
-+ * New quota_on function that is used to turn off accounting when QUOTA feature
-+ * is set.
-+ */
-+static int ext4_acct_off(struct super_block *sb)
-+{
-+ int type, rc = 0;
-+
-+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-+ return -EINVAL;
-+
-+ for (type = 0; type < MAXQUOTAS; type++) {
-+ struct inode *inode = sb_dqopt(sb)->files[type];
-+ handle_t *handle;
-+
-+ if (!inode)
-+ continue;
-+ /* Update modification times of quota files when userspace can
-+ * start looking at them */
-+ handle = ext4_journal_start(inode, 1);
-+ if (IS_ERR(handle))
-+ goto out;
-+
-+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-+ ext4_mark_inode_dirty(handle, inode);
-+ ext4_journal_stop(handle);
-+ }
-+
-+out:
-+ for (type = 0; type < MAXQUOTAS; type++) {
-+ int ret;
-+ ret = vfs_quota_disable(sb, type,
-+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-+ if (!rc && ret)
-+ rc = ret;
-+ }
-+ return rc;
-+}
-+
-+
-+
- /* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+++ /dev/null
-From: Theodore Ts'o <tytso@mit.edu>
-
-From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
-
-
-Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
- ext4_get_blocks()
-
-Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
-which uses a much smaller structure, struct ext4_map_blocks which is
-20 bytes, as opposed to a struct buffer_head, which nearly 5 times
-bigger on an x86_64 machine. By switching things to use
-ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
-since we can avoid allocating a struct buffer_head on the stack.
-
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2016-07-15 12:13:05.000000000 +0300
-+++ linux-stage/fs/ext4/ext4.h 2016-07-15 12:13:05.000000000 +0300
-@@ -142,10 +142,8 @@ struct ext4_allocation_request {
- #define EXT4_MAP_MAPPED (1 << BH_Mapped)
- #define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
- #define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
--#define EXT4_MAP_UNINIT (1 << BH_Uninit)
- #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
-- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
-- EXT4_MAP_UNINIT)
-+ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
-
- struct ext4_map_blocks {
- ext4_fsblk_t m_pblk;
-@@ -2194,9 +2192,9 @@ extern int ext4_ext_tree_init(handle_t *
- extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
- extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
- int chunk);
--extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int max_blocks,
-- struct buffer_head *bh_result, int flags);
-+#define HAVE_EXT4_MAP_BLOCKS
-+extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags);
- extern void ext4_ext_truncate(struct inode *);
- extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
- loff_t length);
-@@ -2206,6 +2204,8 @@ extern long ext4_fallocate(struct inode
- loff_t len);
- extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- ssize_t len);
-+extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags);
- extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
- sector_t block, unsigned int max_blocks,
- struct buffer_head *bh, int flags);
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c 2016-07-15 12:13:04.000000000 +0300
-+++ linux-stage/fs/ext4/extents.c 2016-07-15 12:13:05.000000000 +0300
-@@ -2960,7 +2960,7 @@ fix_extent_len:
-
- #define EXT4_EXT_ZERO_LEN 7
- /*
-- * This function is called by ext4_ext_get_blocks() if someone tries to write
-+ * This function is called by ext4_ext_map_blocks() if someone tries to write
- * to an uninitialized extent. It may result in splitting the uninitialized
- * extent into multiple extents (upto three - one initialized and two
- * uninitialized).
-@@ -2970,11 +2970,10 @@ fix_extent_len:
- * c> Splits in three extents: Somone is writing in middle of the extent
- */
- static int ext4_ext_convert_to_initialized(handle_t *handle,
-- struct inode *inode,
-- struct ext4_ext_path *path,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
-- int flags)
-+ struct inode *inode,
-+ struct ext4_map_blocks *map,
-+ struct ext4_ext_path *path,
-+ int flags)
- {
- struct ext4_extent *ex, newex, orig_ex;
- struct ext4_extent *ex1 = NULL;
-@@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
-
- ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
-- (unsigned long long)iblock, max_blocks);
-+ (unsigned long long)map->m_lblk, map->m_len);
-
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
-- if (eof_block < iblock + max_blocks)
-- eof_block = iblock + max_blocks;
-+ if (eof_block < map->m_lblk + map->m_len)
-+ eof_block = map->m_lblk + map->m_len;
-
- depth = ext_depth(inode);
- eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
- ee_block = le32_to_cpu(ex->ee_block);
- ee_len = ext4_ext_get_actual_len(ex);
-- allocated = ee_len - (iblock - ee_block);
-- newblock = iblock - ee_block + ext4_ext_pblock(ex);
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
- ex2 = ex;
- orig_ex.ee_block = ex->ee_block;
-@@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
- return allocated;
- }
-
-- /* ex1: ee_block to iblock - 1 : uninitialized */
-- if (iblock > ee_block) {
-+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-+ if (map->m_lblk > ee_block) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
-@@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
- * we insert ex3, if ex1 is NULL. This is to avoid temporary
- * overlap of blocks.
- */
-- if (!ex1 && allocated > max_blocks)
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ if (!ex1 && allocated > map->m_len)
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- /* ex3: to ee_block + ee_len : uninitialised */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unsigned int newdepth;
- /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
- if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
- /*
-- * iblock == ee_block is handled by the zerouout
-+ * map->m_lblk == ee_block is handled by the zerouout
- * at the beginning.
- * Mark first half uninitialized.
- * Mark second half initialized and zero out the
-@@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_dirty(handle, inode, path + depth);
-
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex3, newblock);
- ex3->ee_len = cpu_to_le16(allocated);
- err = ext4_ext_insert_extent(handle, inode, path,
-@@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex,
- ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
- */
- depth = ext_depth(inode);
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode,
-- iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk,
-+ path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- return err;
-@@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
- return allocated;
- }
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
-- ext4_ext_store_pblock(ex3, newblock + max_blocks);
-- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
-+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
- ext4_ext_mark_uninitialized(ex3);
- err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC && may_zeroout) {
-@@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zeroed the full extent */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
-
- depth = newdepth;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
- if (err)
- goto out;
-
-- allocated = max_blocks;
-+ allocated = map->m_len;
-
- /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
- * to insert a extent in the middle zerout directly
- * otherwise give the extent a chance to merge to left
- */
- if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
-- iblock != ee_block && may_zeroout) {
-+ map->m_lblk != ee_block && may_zeroout) {
- err = ext4_ext_zeroout(inode, &orig_ex);
- if (err)
- goto fix_extent_len;
-@@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zero out the first half */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
- }
- }
-@@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
- */
- if (ex1 && ex1 != ex) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
- }
-- /* ex2: iblock to iblock + maxblocks-1 : initialised */
-- ex2->ee_block = cpu_to_le32(iblock);
-+ /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
-+ ex2->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex2, newblock);
- ex2->ee_len = cpu_to_le16(allocated);
- if (ex2 != ex)
-@@ -3277,7 +3276,7 @@ fix_extent_len:
- }
-
- /*
-- * This function is called by ext4_ext_get_blocks() from
-+ * This function is called by ext4_ext_map_blocks() from
- * ext4_get_blocks_dio_write() when DIO to write
- * to an uninitialized extent.
- *
-@@ -3300,9 +3299,8 @@ fix_extent_len:
- */
- static int ext4_split_unwritten_extents(handle_t *handle,
- struct inode *inode,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
- int flags)
- {
- struct ext4_extent *ex, newex, orig_ex;
-@@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
-
- ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
-- (unsigned long long)iblock, max_blocks);
-+ (unsigned long long)map->m_lblk, map->m_len);
-
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
-- if (eof_block < iblock + max_blocks)
-- eof_block = iblock + max_blocks;
-+ if (eof_block < map->m_lblk + map->m_len)
-+ eof_block = map->m_lblk + map->m_len;
-
- depth = ext_depth(inode);
- eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
- ee_block = le32_to_cpu(ex->ee_block);
- ee_len = ext4_ext_get_actual_len(ex);
-- allocated = ee_len - (iblock - ee_block);
-- newblock = iblock - ee_block + ext4_ext_pblock(ex);
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
- ex2 = ex;
- orig_ex.ee_block = ex->ee_block;
-@@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
- * block where the write begins, and the write completely
- * covers the extent, then we don't need to split it.
- */
-- if ((iblock == ee_block) && (allocated <= max_blocks))
-+ if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
- return allocated;
-
- err = ext4_ext_get_access(handle, inode, path + depth);
- if (err)
- goto out;
-- /* ex1: ee_block to iblock - 1 : uninitialized */
-- if (iblock > ee_block) {
-+ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-+ if (map->m_lblk > ee_block) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
-@@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
- * we insert ex3, if ex1 is NULL. This is to avoid temporary
- * overlap of blocks.
- */
-- if (!ex1 && allocated > max_blocks)
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ if (!ex1 && allocated > map->m_len)
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- /* ex3: to ee_block + ee_len : uninitialised */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unsigned int newdepth;
- ex3 = &newex;
-- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
-- ext4_ext_store_pblock(ex3, newblock + max_blocks);
-- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
-+ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-+ ext4_ext_store_pblock(ex3, newblock + map->m_len);
-+ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
- ext4_ext_mark_uninitialized(ex3);
- err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC && may_zeroout) {
-@@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
- err = ext4_ext_zeroout(inode, ex3);
- if (err)
- goto fix_extent_len;
-- max_blocks = allocated;
-- ex2->ee_len = cpu_to_le16(max_blocks);
-+ map->m_len = allocated;
-+ ex2->ee_len = cpu_to_le16(map->m_len);
- goto skip;
- }
- err = ext4_ext_zeroout(inode, &orig_ex);
-@@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
- ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
- ext4_ext_dirty(handle, inode, path + depth);
- /* zeroed the full extent */
-- /* blocks available from iblock */
-+ /* blocks available from map->m_lblk */
- return allocated;
-
- } else if (err)
-@@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
-
- depth = newdepth;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
- err = ext4_ext_get_access(handle, inode, path + depth);
- if (err)
- goto out;
--
-- allocated = max_blocks;
-+ allocated = map->m_len;
- }
- skip:
- /*
-@@ -3457,16 +3454,16 @@ skip:
- */
- if (ex1 && ex1 != ex) {
- ex1 = ex;
-- ex1->ee_len = cpu_to_le16(iblock - ee_block);
-+ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
- ext4_ext_mark_uninitialized(ex1);
- ext4_ext_dirty(handle, inode, path + depth);
- ex2 = &newex;
- }
- /*
-- * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
-- * uninitialised still.
-+ * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
-+ * using direct I/O, uninitialised still.
- */
-- ex2->ee_block = cpu_to_le32(iblock);
-+ ex2->ee_block = cpu_to_le32(map->m_lblk);
- ext4_ext_store_pblock(ex2, newblock);
- ex2->ee_len = cpu_to_le16(allocated);
- ext4_ext_mark_uninitialized(ex2);
-@@ -3506,8 +3503,7 @@ fix_extent_len:
-
- static int ext4_convert_unwritten_extents_dio(handle_t *handle,
- struct inode *inode,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path)
- {
- struct ext4_extent *ex;
-@@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
-
- /* If extent is larger than requested then split is required */
-
-- if (ee_block != iblock || ee_len > max_blocks) {
-- err = ext4_split_unwritten_extents(handle, inode, path,
-- iblock, max_blocks,
-+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
-+ err = ext4_split_unwritten_extents(handle, inode, map, path,
- EXT4_EXT_DATA_VALID);
- if (err < 0)
- goto out;
- ext4_ext_drop_refs(path);
-- path = ext4_ext_find_extent(inode, iblock, path);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
-@@ -3627,10 +3622,9 @@ out:
-
- static int
- ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int max_blocks,
-+ struct ext4_map_blocks *map,
- struct ext4_ext_path *path, int flags,
-- unsigned int allocated, struct buffer_head *bh_result,
-- ext4_fsblk_t newblock)
-+ unsigned int allocated, ext4_fsblk_t newblock)
- {
- int ret = 0;
- int err = 0;
-@@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
-
- ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
- "block %llu, max_blocks %u, flags %d, allocated %u",
-- inode->i_ino, (unsigned long long)iblock, max_blocks,
-+ inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
- flags, allocated);
- ext4_ext_show_leaf(inode, path);
-
-@@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
- /* DIO get_block() before submit the IO, split the extent */
- if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
- EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
-- ret = ext4_split_unwritten_extents(handle,
-- inode, path, iblock,
-- max_blocks, flags);
-+ ret = ext4_split_unwritten_extents(handle, inode, map,
-+ path, flags);
- if (ret <= 0)
- goto out;
- /*
-@@ -3674,12 +3667,11 @@ ext4_ext_handle_uninitialized_extents(ha
- if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
- EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
- ret = ext4_convert_unwritten_extents_dio(handle, inode,
-- iblock, max_blocks,
-- path);
-+ map, path);
- if (ret >= 0) {
- ext4_update_inode_fsync_trans(handle, inode, 1);
-- err = check_eofblocks_fl(handle, inode, iblock, path,
-- max_blocks);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
-+ map->m_len);
- } else
- err = ret;
- goto out2;
-@@ -3701,18 +3693,15 @@ ext4_ext_handle_uninitialized_extents(ha
- * the buffer head will be unmapped so that
- * a read from the block returns 0s.
- */
-- set_buffer_unwritten(bh_result);
-+ map->m_flags |= EXT4_MAP_UNWRITTEN;
- goto out1;
- }
-
- /* buffered write, writepage time, convert*/
-- ret = ext4_ext_convert_to_initialized(handle, inode,
-- path, iblock,
-- max_blocks,
-- flags);
-+ ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
- if (ret >= 0) {
- ext4_update_inode_fsync_trans(handle, inode, 1);
-- err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
- if (err < 0)
- goto out2;
- }
-@@ -3722,7 +3711,7 @@ out:
- goto out2;
- } else
- allocated = ret;
-- set_buffer_new(bh_result);
-+ map->m_flags |= EXT4_MAP_NEW;
- /*
- * if we allocated more blocks than requested
- * we need to make sure we unmap the extra block
-@@ -3730,11 +3719,11 @@ out:
- * unmapped later when we find the buffer_head marked
- * new.
- */
-- if (allocated > max_blocks) {
-+ if (allocated > map->m_len) {
- unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
-- newblock + max_blocks,
-- allocated - max_blocks);
-- allocated = max_blocks;
-+ newblock + map->m_len,
-+ allocated - map->m_len);
-+ allocated = map->m_len;
- }
-
- /*
-@@ -3748,13 +3737,13 @@ out:
- ext4_da_update_reserve_space(inode, allocated, 0);
-
- map_out:
-- set_buffer_mapped(bh_result);
-+ map->m_flags |= EXT4_MAP_MAPPED;
- out1:
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
-- bh_result->b_bdev = inode->i_sb->s_bdev;
-- bh_result->b_blocknr = newblock;
-+ map->m_pblk = newblock;
-+ map->m_len = allocated;
- out2:
- if (path) {
- ext4_ext_drop_refs(path);
-@@ -3781,10 +3770,8 @@ out2:
- *
- * return < 0, error case.
- */
--int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock,
-- unsigned int max_blocks, struct buffer_head *bh_result,
-- int flags)
-+int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags)
- {
- struct ext4_ext_path *path = NULL;
- struct ext4_extent_header *eh;
-@@ -3796,12 +3783,11 @@ int ext4_ext_get_blocks(handle_t *handle
- ext4_io_end_t *io = ext4_inode_aio(inode);
- int set_unwritten = 0;
-
-- __clear_bit(BH_New, &bh_result->b_state);
- ext_debug("blocks %u/%u requested for inode %lu\n",
-- iblock, max_blocks, inode->i_ino);
-+ map->m_lblk, map->m_len, inode->i_ino);
-
- /* check in cache */
-- if (ext4_ext_in_cache(inode, iblock, &newex)) {
-+ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
- if (!newex.ee_start_lo && !newex.ee_start_hi) {
- if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
- /*
-@@ -3813,18 +3799,18 @@ int ext4_ext_get_blocks(handle_t *handle
- /* we should allocate requested block */
- } else {
- /* block is already allocated */
-- newblock = iblock
-+ newblock = map->m_lblk
- - le32_to_cpu(newex.ee_block)
- + ext4_ext_pblock(&newex);
- /* number of remaining blocks in the extent */
- allocated = ext4_ext_get_actual_len(&newex) -
-- (iblock - le32_to_cpu(newex.ee_block));
-+ (map->m_lblk - le32_to_cpu(newex.ee_block));
- goto out;
- }
- }
-
- /* find extent for this block */
-- path = ext4_ext_find_extent(inode, iblock, NULL);
-+ path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- path = NULL;
-@@ -3841,7 +3827,7 @@ int ext4_ext_get_blocks(handle_t *handle
- if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
- EXT4_ERROR_INODE(inode, "bad extent address "
- "iblock: %d, depth: %d pblock %lld",
-- iblock, depth, path[depth].p_block);
-+ map->m_lblk, depth, path[depth].p_block);
- err = -EIO;
- goto out2;
- }
-@@ -3859,11 +3845,11 @@ int ext4_ext_get_blocks(handle_t *handle
- */
- ee_len = ext4_ext_get_actual_len(ex);
- /* if found extent covers block, simply return it */
-- if (in_range(iblock, ee_block, ee_len)) {
-- newblock = iblock - ee_block + ee_start;
-+ if (in_range(map->m_lblk, ee_block, ee_len)) {
-+ newblock = map->m_lblk - ee_block + ee_start;
- /* number of remaining blocks in the extent */
-- allocated = ee_len - (iblock - ee_block);
-- ext_debug("%u fit into %u:%d -> %llu\n", iblock,
-+ allocated = ee_len - (map->m_lblk - ee_block);
-+ ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
- ee_block, ee_len, newblock);
-
- /*
-@@ -3875,9 +3861,9 @@ int ext4_ext_get_blocks(handle_t *handle
- ee_len, ee_start);
- goto out;
- }
-- ret = ext4_ext_handle_uninitialized_extents(
-- handle, inode, iblock, max_blocks, path,
-- flags, allocated, bh_result, newblock);
-+ ret = ext4_ext_handle_uninitialized_extents(handle,
-+ inode, map, path, flags, allocated,
-+ newblock);
- return ret;
- }
- }
-@@ -3891,7 +3877,7 @@ int ext4_ext_get_blocks(handle_t *handle
- * put just found gap into cache to speed up
- * subsequent requests
- */
-- ext4_ext_put_gap_in_cache(inode, path, iblock);
-+ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
- goto out2;
- }
- /*
-@@ -3899,11 +3885,11 @@ int ext4_ext_get_blocks(handle_t *handle
- */
-
- /* find neighbour allocated blocks */
-- ar.lleft = iblock;
-+ ar.lleft = map->m_lblk;
- err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
- if (err)
- goto out2;
-- ar.lright = iblock;
-+ ar.lright = map->m_lblk;
- err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
- if (err)
- goto out2;
-@@ -3914,26 +3900,26 @@ int ext4_ext_get_blocks(handle_t *handle
- * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
- * EXT_UNINIT_MAX_LEN.
- */
-- if (max_blocks > EXT_INIT_MAX_LEN &&
-+ if (map->m_len > EXT_INIT_MAX_LEN &&
- !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
-- max_blocks = EXT_INIT_MAX_LEN;
-- else if (max_blocks > EXT_UNINIT_MAX_LEN &&
-+ map->m_len = EXT_INIT_MAX_LEN;
-+ else if (map->m_len > EXT_UNINIT_MAX_LEN &&
- (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
-- max_blocks = EXT_UNINIT_MAX_LEN;
-+ map->m_len = EXT_UNINIT_MAX_LEN;
-
-- /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
-- newex.ee_block = cpu_to_le32(iblock);
-- newex.ee_len = cpu_to_le16(max_blocks);
-+ /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
-+ newex.ee_block = cpu_to_le32(map->m_lblk);
-+ newex.ee_len = cpu_to_le16(map->m_len);
- err = ext4_ext_check_overlap(inode, &newex, path);
- if (err)
- allocated = ext4_ext_get_actual_len(&newex);
- else
-- allocated = max_blocks;
-+ allocated = map->m_len;
-
- /* allocate new block */
- ar.inode = inode;
-- ar.goal = ext4_ext_find_goal(inode, path, iblock);
-- ar.logical = iblock;
-+ ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
-+ ar.logical = map->m_lblk;
- ar.len = allocated;
- if (S_ISREG(inode->i_mode))
- ar.flags = EXT4_MB_HINT_DATA;
-@@ -3966,7 +3952,7 @@ int ext4_ext_get_blocks(handle_t *handle
- set_unwritten = 1;
- }
-
-- err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
-+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
- if (err)
- goto out2;
-
-@@ -3997,9 +3983,9 @@ int ext4_ext_get_blocks(handle_t *handle
- /* previous routine could use block we allocated */
- newblock = ext4_ext_pblock(&newex);
- allocated = ext4_ext_get_actual_len(&newex);
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-- set_buffer_new(bh_result);
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
-+ map->m_flags |= EXT4_MAP_NEW;
-
- /*
- * Update reserved blocks/metadata blocks after successful
-@@ -4013,17 +3999,17 @@ int ext4_ext_get_blocks(handle_t *handle
- * when it is _not_ an uninitialized extent.
- */
- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-- ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
-+ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
- ext4_update_inode_fsync_trans(handle, inode, 1);
- } else
- ext4_update_inode_fsync_trans(handle, inode, 0);
- out:
-- if (allocated > max_blocks)
-- allocated = max_blocks;
-+ if (allocated > map->m_len)
-+ allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
-- set_buffer_mapped(bh_result);
-- bh_result->b_bdev = inode->i_sb->s_bdev;
-- bh_result->b_blocknr = newblock;
-+ map->m_flags |= EXT4_MAP_MAPPED;
-+ map->m_pblk = newblock;
-+ map->m_len = allocated;
- out2:
- if (path) {
- ext4_ext_drop_refs(path);
-@@ -4206,7 +4192,7 @@ retry:
- if (ret <= 0) {
- #ifdef EXT4FS_DEBUG
- WARN_ON(ret <= 0);
-- printk(KERN_ERR "%s: ext4_ext_get_blocks "
-+ printk(KERN_ERR "%s: ext4_ext_map_blocks "
- "returned error inode#%lu, block=%u, "
- "max_blocks=%u", __func__,
- inode->i_ino, block, max_blocks);
-@@ -4720,6 +4706,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
- EXPORT_SYMBOL(ext4_mb_new_blocks);
- EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
- EXPORT_SYMBOL(ext4_mark_inode_dirty);
--EXPORT_SYMBOL(ext4_ext_walk_space);
- EXPORT_SYMBOL(ext4_ext_find_extent);
- EXPORT_SYMBOL(ext4_ext_drop_refs);
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c 2016-07-15 12:13:05.000000000 +0300
-+++ linux-stage/fs/ext4/inode.c 2016-07-15 12:15:36.000000000 +0300
-@@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
- int ret;
-
- /*
-- * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
-+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
- * moment, get_block can be called only for blocks inside i_size since
- * page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
-@@ -970,9 +970,9 @@ err_out:
- }
-
- /*
-- * The ext4_ind_get_blocks() function handles non-extents inodes
-+ * The ext4_ind_map_blocks() function handles non-extents inodes
- * (i.e., using the traditional indirect/double-indirect i_blocks
-- * scheme) for ext4_get_blocks().
-+ * scheme) for ext4_map_blocks().
- *
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
-@@ -991,15 +991,14 @@ err_out:
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- *
-- * The ext4_ind_get_blocks() function should be called with
-+ * The ext4_ind_map_blocks() function should be called with
- * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
- * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
- * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
- * blocks.
- */
--static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
-- ext4_lblk_t iblock, unsigned int maxblocks,
-- struct buffer_head *bh_result,
-+static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map,
- int flags)
- {
- int err = -EIO;
-@@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t
-
- J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
- J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
-- depth = ext4_block_to_path(inode, iblock, offsets,
-+ depth = ext4_block_to_path(inode, map->m_lblk, offsets,
- &blocks_to_boundary);
-
- if (depth == 0)
-@@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t
- /* Simplest case - block found, no allocation needed */
- if (!partial) {
- first_block = le32_to_cpu(chain[depth - 1].key);
-- clear_buffer_new(bh_result);
- count++;
- /*map more blocks*/
-- while (count < maxblocks && count <= blocks_to_boundary) {
-+ while (count < map->m_len && count <= blocks_to_boundary) {
- ext4_fsblk_t blk;
-
- blk = le32_to_cpu(*(chain[depth-1].p + count));
-@@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t
- /*
- * Okay, we need to do block allocation.
- */
-- goal = ext4_find_goal(inode, iblock, partial);
-+ goal = ext4_find_goal(inode, map->m_lblk, partial);
-
- /* the number of blocks need to allocate for [d,t]indirect blocks */
- indirect_blks = (chain + depth) - partial - 1;
-@@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t
- * direct blocks to allocate for this branch.
- */
- count = ext4_blks_to_allocate(partial, indirect_blks,
-- maxblocks, blocks_to_boundary);
-+ map->m_len, blocks_to_boundary);
- /*
- * Block out ext4_truncate while we alter the tree
- */
-- err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
-+ err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
- &count, goal,
- offsets + (partial - chain), partial);
-
-@@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t
- * may need to return -EAGAIN upwards in the worst case. --sct
- */
- if (!err)
-- err = ext4_splice_branch(handle, inode, iblock,
-+ err = ext4_splice_branch(handle, inode, map->m_lblk,
- partial, indirect_blks, count);
- if (err)
- goto cleanup;
-
-- set_buffer_new(bh_result);
-+ map->m_flags |= EXT4_MAP_NEW;
-
- ext4_update_inode_fsync_trans(handle, inode, 1);
- got_it:
-- map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-+ map->m_flags |= EXT4_MAP_MAPPED;
-+ map->m_pblk = le32_to_cpu(chain[depth-1].key);
-+ map->m_len = count;
- if (count > blocks_to_boundary)
-- set_buffer_boundary(bh_result);
-+ map->m_flags |= EXT4_MAP_BOUNDARY;
- err = count;
- /* Clean up and exit */
- partial = chain + depth - 1; /* the whole chain */
-@@ -1096,7 +1096,6 @@ cleanup:
- brelse(partial->bh);
- partial--;
- }
-- BUFFER_TRACE(bh_result, "returned");
- out:
- return err;
- }
-@@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
- }
-
- /*
-- * The ext4_get_blocks() function tries to look up the requested blocks,
-+ * The ext4_map_blocks() function tries to look up the requested blocks,
- * and returns if the blocks are already mapped.
- *
- * Otherwise it takes the write lock of the i_data_sem and allocate blocks
- * and store the allocated blocks in the result buffer head and mark it
- * mapped.
- *
-- * If file type is extents based, it will call ext4_ext_get_blocks(),
-- * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
-+ * If file type is extents based, it will call ext4_ext_map_blocks(),
-+ * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
- * based files
- *
- * On success, it returns the number of blocks being mapped or allocate.
-@@ -1312,39 +1311,33 @@ static pgoff_t ext4_num_dirty_pages(stru
- *
- * It returns the error in case of allocation failure.
- */
--int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
-- unsigned int max_blocks, struct buffer_head *bh,
-- int flags)
-+int ext4_map_blocks(handle_t *handle, struct inode *inode,
-+ struct ext4_map_blocks *map, int flags)
- {
- int retval;
-+
-+ map->m_flags = 0;
-+ ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
-+ "logical block %lu\n", inode->i_ino, flags, map->m_len,
-+ (unsigned long) map->m_lblk);
-
-- clear_buffer_mapped(bh);
-- clear_buffer_unwritten(bh);
--
-- ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
-- "logical block %lu\n", inode->i_ino, flags, max_blocks,
-- (unsigned long)block);
- /*
- * Try to see if we can get the block without requesting a new
- * file system block.
- */
- down_read((&EXT4_I(inode)->i_data_sem));
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
-- bh, 0);
-+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
- } else {
-- retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
-- bh, 0);
-+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
- }
- up_read((&EXT4_I(inode)->i_data_sem));
-
-- if (retval > 0 && buffer_mapped(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, "file system corruption",
-- block, bh->b_blocknr, retval);
-- if (ret != 0) {
-- bh->b_blocknr = 0;
-+ map->m_lblk, map->m_pblk, retval);
-+ if (ret != 0)
- return ret;
-- }
- }
-
- /* If it is only a block(s) look up */
-@@ -1358,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
- * ext4_ext_get_block() returns th create = 0
- * with buffer head unmapped.
- */
-- if (retval > 0 && buffer_mapped(bh))
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
- return retval;
-
- /*
-@@ -1371,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
- * of BH_Unwritten and BH_Mapped flags being simultaneously
- * set on the buffer_head.
- */
-- clear_buffer_unwritten(bh);
-+ map->m_flags &= ~EXT4_MAP_UNWRITTEN;
-
- /*
- * New blocks allocate and/or writing to uninitialized extent
-@@ -1394,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
- * could have changed the inode type in between
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
-- bh, flags);
-+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
- } else {
-- retval = ext4_ind_get_blocks(handle, inode, block,
-- max_blocks, bh, flags);
-+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
-
-- if (retval > 0 && buffer_new(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
- /*
- * We allocated new blocks which will result in
- * i_data's format changing. Force the migrate
-@@ -1423,17 +1414,39 @@ int ext4_get_blocks(handle_t *handle, st
- EXT4_I(inode)->i_delalloc_reserved_flag = 0;
-
- up_write((&EXT4_I(inode)->i_data_sem));
-- if (retval > 0 && buffer_mapped(bh)) {
-+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, "file system "
- "corruption after allocation",
-- block, bh->b_blocknr, retval);
-+ map->m_lblk, map->m_pblk,
-+ retval);
- if (ret != 0) {
-- bh->b_blocknr = 0;
- return ret;
- }
- }
- return retval;
- }
-+EXPORT_SYMBOL(ext4_map_blocks);
-+
-+int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
-+ unsigned int max_blocks, struct buffer_head *bh,
-+ int flags)
-+{
-+ struct ext4_map_blocks map;
-+ int ret;
-+
-+ map.m_lblk = block;
-+ map.m_len = max_blocks;
-+
-+ ret = ext4_map_blocks(handle, inode, &map, flags);
-+ if (ret < 0)
-+ return ret;
-+
-+ bh->b_blocknr = map.m_pblk;
-+ bh->b_size = inode->i_sb->s_blocksize * map.m_len;
-+ bh->b_bdev = inode->i_sb->s_bdev;
-+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
-+ return ret;
-+}
-
- /* Maximum number of blocks we map for direct IO at once. */
- #define DIO_MAX_BLOCKS 4096
+++ /dev/null
-diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index 61aeacb..026c89f 100644
---- a/fs/ext4/balloc.c
-+++ b/fs/ext4/balloc.c
-@@ -97,12 +97,11 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
- /* If checksum is bad mark all blocks used to prevent allocation
- * essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-- ext4_error(sb, "Checksum bad for group %u",
-+ ext4_corrupted_block_group(sb, block_group,
-+ EXT4_GROUP_INFO_BBITMAP_CORRUPT |
-+ EXT4_GROUP_INFO_IBITMAP_CORRUPT,
-+ "Checksum bad for group %u",
- block_group);
-- ext4_free_blks_set(sb, gdp, 0);
-- ext4_free_inodes_set(sb, gdp, 0);
-- ext4_itable_unused_set(sb, gdp, 0);
-- memset(bh->b_data, 0xff, sb->s_blocksize);
- return 0;
- }
- memset(bh->b_data, 0, sb->s_blocksize);
-@@ -277,7 +276,9 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
- return 1;
-
- err_out:
-- ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
-+ ext4_corrupted_block_group(sb, block_group,
-+ EXT4_GROUP_INFO_BBITMAP_CORRUPT,
-+ "Invalid block bitmap - block_group = %d, block = %llu",
- block_group, bitmap_blk);
- return 0;
- }
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 4ed330c..938487a 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -75,8 +75,17 @@ typedef __u32 ext4_lblk_t;
- /* data type for block group number */
- typedef unsigned int ext4_group_t;
-
-+void __ext4_corrupted_block_group(struct super_block *sb,
-+ ext4_group_t group, unsigned int flags);
-+
-+#define ext4_corrupted_block_group(sb, group, flags, fmt...) \
-+ do { \
-+ __ext4_warning(sb, __func__, ## fmt); \
-+ __ext4_corrupted_block_group(sb, group, flags); \
-+ } while (0)
-+
- /*
-- * Flags used in mballoc's allocation_context flags field.
-+ * Flags used in mballoc's allocation_context flags field.
- *
- * Also used to show what's going on for debugging purposes when the
- * flag field is exported via the traceport interface
-@@ -2203,9 +2212,19 @@ struct ext4_group_info {
-
- #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
- #define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
-+#define EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT 2
-+#define EXT4_GROUP_INFO_BBITMAP_CORRUPT \
-+ (1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT)
-+#define EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT 3
-+#define EXT4_GROUP_INFO_IBITMAP_CORRUPT \
-+ (1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT)
-
- #define EXT4_MB_GRP_NEED_INIT(grp) \
- (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
-+#define EXT4_MB_GRP_BBITMAP_CORRUPT(grp) \
-+ (test_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
-+#define EXT4_MB_GRP_IBITMAP_CORRUPT(grp) \
-+ (test_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
-
- #define EXT4_MB_GRP_WAS_TRIMMED(grp) \
- (test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
-diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
-index f3509ba..8894963 100644
---- a/fs/ext4/ialloc.c
-+++ b/fs/ext4/ialloc.c
-@@ -76,11 +76,10 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
- /* If checksum is bad mark all blocks and inodes use to prevent
- * allocation, essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-- ext4_error(sb, "Checksum bad for group %u", block_group);
-- ext4_free_blks_set(sb, gdp, 0);
-- ext4_free_inodes_set(sb, gdp, 0);
-- ext4_itable_unused_set(sb, gdp, 0);
-- memset(bh->b_data, 0xff, sb->s_blocksize);
-+ ext4_corrupted_block_group(sb, block_group,
-+ EXT4_GROUP_INFO_BBITMAP_CORRUPT |
-+ EXT4_GROUP_INFO_IBITMAP_CORRUPT,
-+ "Checksum bad for group %u", block_group);
- return 0;
- }
-
-@@ -192,6 +191,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
- struct ext4_super_block *es;
- struct ext4_sb_info *sbi;
- int fatal = 0, err, count, cleared;
-+ struct ext4_group_info *grp;
-
- if (atomic_read(&inode->i_count) > 1) {
- printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
-@@ -235,7 +235,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
- block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
- bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
- bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
-- if (!bitmap_bh)
-+ /* Don't bother if the inode bitmap is corrupt. */
-+ grp = ext4_get_group_info(sb, block_group);
-+ if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
- goto error_return;
-
- BUFFER_TRACE(bitmap_bh, "get_write_access");
-@@ -247,9 +249,12 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
- ext4_lock_group(sb, block_group);
- cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
- ext4_unlock_group(sb, block_group);
-- if (!cleared)
-- ext4_error(sb, "bit already cleared for inode %lu", ino);
-- else {
-+ if (!cleared) {
-+ ext4_corrupted_block_group(sb, block_group,
-+ EXT4_GROUP_INFO_IBITMAP_CORRUPT,
-+ "bit already cleared for inode %lu",
-+ ino);
-+ } else {
- gdp = ext4_get_group_desc(sb, block_group, &bh2);
-
- BUFFER_TRACE(bh2, "get_write_access");
-@@ -825,6 +830,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
- int free = 0;
- static int once = 1;
- ext4_group_t flex_group;
-+ struct ext4_group_info *grp;
-
- /* Cannot create files in a deleted directory */
- if (!dir || !dir->i_nlink)
-@@ -884,10 +890,21 @@ got_group:
- if (!gdp)
- goto fail;
-
-+ grp = ext4_get_group_info(sb, group);
-+ /* Skip groups with already-known suspicious inode tables */
-+ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
-+ if (++group == ngroups)
-+ group = 0;
-+ continue;
-+ }
- brelse(inode_bitmap_bh);
- inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
-- if (!inode_bitmap_bh)
-- goto fail;
-+ /* Skip groups with suspicious inode tables */
-+ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
-+ if (++group == ngroups)
-+ group = 0;
-+ continue;
-+ }
-
- repeat_in_this_group:
- ino = ext4_find_next_zero_bit((unsigned long *)
-diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index efcf909..06cd929 100644
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -715,10 +715,12 @@ int ext4_mb_generate_buddy(struct super_block *sb,
- if (free != grp->bb_free) {
- struct ext4_group_desc *gdp;
- gdp = ext4_get_group_desc (sb, group, NULL);
-- ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
-- "%u in gd, %lu pa's\n", (long unsigned int)group,
-- free, grp->bb_free, ext4_free_blks_count(sb, gdp),
-- grp->bb_prealloc_nr);
-+ ext4_corrupted_block_group(sb, group,
-+ EXT4_GROUP_INFO_BBITMAP_CORRUPT,
-+ "group %lu: %u blocks in bitmap, %u in bb, %u in gd, %lu pa's\n",
-+ (long unsigned int)group, free, grp->bb_free,
-+ ext4_free_blks_count(sb, gdp),
-+ grp->bb_prealloc_nr);
- return -EIO;
- }
- mb_set_largest_free_order(sb, grp);
-@@ -1120,7 +1122,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
- */
- ret = ext4_mb_init_group(sb, group);
- if (ret)
-- return ret;
-+ goto err;
- }
-
- /*
-@@ -1204,6 +1206,8 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
- return 0;
-
- err:
-+ ext4_warning(sb, "Error in loading buddy information for %u",
-+ group);
- if (e4b->bd_bitmap_page)
- page_cache_release(e4b->bd_bitmap_page);
- if (e4b->bd_buddy_page)
-@@ -1291,6 +1295,10 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
-
- BUG_ON(first + count > (sb->s_blocksize << 3));
- assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
-+ /* Don't bother if the block group is corrupt. */
-+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
-+ return;
-+
- mb_check_buddy(e4b);
- mb_free_blocks_double(inode, e4b, first, count);
-
-@@ -1321,9 +1329,12 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
- ext4_grp_locked_error(sb, e4b->bd_group,
- __func__, "double-free of inode"
-- " %lu's block %llu(bit %u in group %u)",
-+ " %lu's block %llu(bit %u in group %u) block bitmap corrupt",
- inode ? inode->i_ino : 0, blocknr, block,
- e4b->bd_group);
-+ /* Mark the block group as corrupt. */
-+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
-+ &e4b->bd_info->bb_state);
- }
- mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
- e4b->bd_info->bb_counters[order]++;
-@@ -1700,6 +1711,11 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
- if (err)
- return err;
-
-+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
-+ ext4_mb_release_desc(e4b);
-+ return 0;
-+ }
-+
- ext4_lock_group(ac->ac_sb, group);
- max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
- ac->ac_g_ex.fe_len, &ex);
-@@ -1912,6 +1928,9 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
-
- BUG_ON(cr < 0 || cr >= 4);
-
-+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
-+ return 0;
-+
- /* We only do this if the grp has never been initialized */
- if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
- int ret = ext4_mb_init_group(ac->ac_sb, group);
-@@ -3382,9 +3401,10 @@ int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
- }
-
- if (free != free_in_gdp) {
-- ext4_error(sb, "on-disk bitmap for group %d"
-- "corrupted: %u blocks free in bitmap, %u - in gd\n",
-- group, free, free_in_gdp);
-+ ext4_corrupted_block_group(sb, group,
-+ EXT4_GROUP_INFO_BBITMAP_CORRUPT,
-+ "on-disk bitmap for group %d corrupted: %u blocks free in bitmap, %u - in gd\n",
-+ group, free, free_in_gdp);
- return -EIO;
- }
- return 0;
-@@ -3753,14 +3773,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
- /* "free < pa->pa_free" means we maybe double alloc the same blocks,
- * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
- if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
-- ext4_error(sb, "pa free mismatch: [pa %p] "
-- "[phy %lu] [logic %lu] [len %u] [free %u] "
-- "[error %u] [inode %lu] [freed %u]", pa,
-- (unsigned long)pa->pa_pstart,
-- (unsigned long)pa->pa_lstart,
-- (unsigned)pa->pa_len, (unsigned)pa->pa_free,
-- (unsigned)pa->pa_error, pa->pa_inode->i_ino,
-- free);
- ext4_grp_locked_error(sb, group,
- __func__, "free %u, pa_free %u",
- free, pa->pa_free);
-@@ -3834,14 +3846,11 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
- return 0;
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
-- if (bitmap_bh == NULL) {
-- ext4_error(sb, "Error reading block bitmap for %u", group);
-+ if (bitmap_bh == NULL)
- return 0;
-- }
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err) {
-- ext4_error(sb, "Error loading buddy information for %u", group);
- put_bh(bitmap_bh);
- return 0;
- }
-@@ -4015,16 +4024,11 @@ repeat:
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
-- if (err) {
-- ext4_error(sb, "Error loading buddy information for %u",
-- group);
-+ if (err)
- return;
-- }
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (bitmap_bh == NULL) {
-- ext4_error(sb, "Error reading block bitmap for %u",
-- group);
- ext4_mb_release_desc(&e4b);
- continue;
- }
-@@ -4299,11 +4303,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
- list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
-
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
-- if (ext4_mb_load_buddy(sb, group, &e4b)) {
-- ext4_error(sb, "Error loading buddy information for %u",
-- group);
-+ if (ext4_mb_load_buddy(sb, group, &e4b))
- continue;
-- }
- ext4_lock_group(sb, group);
- list_del(&pa->pa_group_list);
- ext4_get_group_info(sb, group)->bb_prealloc_nr--;
-@@ -4565,7 +4566,7 @@ repeat:
- * been updated or not when fail case. So can
- * not revert pa_free back, just mark pa_error*/
- pa->pa_error++;
-- ext4_error(sb,
-+ ext4_corrupted_block_group(sb, 0, 0,
- "Updating bitmap error: [err %d] "
- "[pa %p] [phy %lu] [logic %lu] "
- "[len %u] [free %u] [error %u] "
-@@ -4710,6 +4711,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
- struct ext4_sb_info *sbi;
- struct ext4_buddy e4b;
- int err = 0;
-+ int skip_error = 0;
- int ret;
-
- /*
-@@ -4746,6 +4748,10 @@ do_more:
- overflow = 0;
- ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
-
-+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
-+ ext4_get_group_info(sb, block_group))))
-+ return;
-+
- /*
- * Check to see if we are freeing blocks across a group
- * boundary.
-@@ -4807,8 +4813,10 @@ do_more:
- }
-
- err = ext4_mb_load_buddy(sb, block_group, &e4b);
-- if (err)
-+ if (err) {
-+ skip_error = 1;
- goto error_return;
-+ }
- if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
- struct ext4_free_data *new_entry;
- /*
-@@ -4876,10 +4884,10 @@ error_return:
- if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
- vfs_dq_free_block(inode, freed);
- brelse(bitmap_bh);
-- ext4_std_error(sb, err);
-+ if (!skip_error)
-+ ext4_std_error(sb, err);
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
-- return;
- }
-
- /**
-@@ -4970,7 +4978,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
-
- err = ext4_mb_load_buddy(sb, block_group, &e4b);
- if (err)
-- goto error_return;
-+ goto error_brelse;
-
- /*
- * need to update group_info->bb_free and bitmap
-@@ -5006,9 +5014,9 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
- sb->s_dirt = 1;
-
- error_return:
-- brelse(bitmap_bh);
- ext4_std_error(sb, err);
-- return;
-+error_brelse:
-+ brelse(bitmap_bh);
- }
-
- /**
-@@ -5078,11 +5086,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
- trace_ext4_trim_all_free(sb, group, start, max);
-
- ret = ext4_mb_load_buddy(sb, group, &e4b);
-- if (ret) {
-- ext4_error(sb, "Error in loading buddy "
-- "information for %u", group);
-+ if (ret)
- return ret;
-- }
- bitmap = e4b.bd_bitmap;
-
- ext4_lock_group(sb, group);
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 31ee33b..f02a632 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -562,6 +562,34 @@ void __ext4_warning(struct super_block *sb, const char *function,
- va_end(args);
- }
-
-+void __ext4_corrupted_block_group(struct super_block *sb, ext4_group_t group,
-+ unsigned int flags)
-+{
-+ struct ext4_sb_info *sbi = EXT4_SB(sb);
-+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
-+ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
-+
-+ if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT &&
-+ !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
-+ ext4_free_blks_set(sb, gdp, 0);
-+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
-+ &grp->bb_state);
-+ }
-+
-+ if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT &&
-+ !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
-+ if (gdp) {
-+ ext4_free_inodes_set(sb, gdp, 0);
-+ ext4_itable_unused_set(sb, gdp, 0);
-+ }
-+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
-+ &grp->bb_state);
-+ }
-+ sbi->s_mount_state |= EXT4_ERROR_FS;
-+ sbi->s_es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-+ ext4_commit_super(sb, 1);
-+}
-+
- void ext4_grp_locked_error(struct super_block *sb, ext4_group_t grp,
- const char *function, const char *fmt, ...)
- __releases(bitlock)
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2012-08-07 14:16:06.331203480 -0700
-+++ linux-stage/fs/ext4/ext4.h 2012-08-10 10:08:47.854206335 -0700
-@@ -713,6 +713,7 @@
- /* following fields for parallel directory operations -bzzz */
- struct semaphore i_append_sem;
-
-+ ext4_lblk_t i_dir_start_lookup;
- /*
- * i_block_group is the number of the block group which contains
- * this file's inode. Constant across the lifetime of the inode,
-@@ -724,7 +725,6 @@
- unsigned long i_state_flags; /* Dynamic state flags */
- unsigned long i_flags;
-
-- ext4_lblk_t i_dir_start_lookup;
- #ifdef CONFIG_EXT4_FS_XATTR
- /*
- * Extended attributes can be read independently of the main file
-@@ -788,10 +788,12 @@
- unsigned int i_reserved_data_blocks;
- unsigned int i_reserved_meta_blocks;
- unsigned int i_allocated_meta_blocks;
-- unsigned short i_delalloc_reserved_flag;
- sector_t i_da_metadata_calc_last_lblock;
- int i_da_metadata_calc_len;
-
-+ /* allocation reservation info for delalloc */
-+ unsigned short i_delalloc_reserved_flag;
-+
- /* on-disk additional length */
- __u16 i_extra_isize;
-
-@@ -807,16 +809,22 @@
- struct list_head i_aio_dio_complete_list;
- spinlock_t i_completed_io_lock;
- atomic_t i_unwritten; /* Number of inflight conversions pending */
-- struct mutex i_aio_mutex; /* big hammer for unaligned AIO */
-
- /*
- * Transactions that contain inode's metadata needed to complete
- * fsync and fdatasync, respectively.
- */
-+
- tid_t i_sync_tid;
-- tid_t i_datasync_tid;
-+
-+ struct mutex i_aio_mutex; /* big hammer for unaligned AIO */
-
- __u64 i_fs_version;
-+ /*
-+ * Transactions that contain inode's metadata needed to complete
-+ * fsync and fdatasync, respectively.
-+ */
-+ tid_t i_datasync_tid;
- };
-
- #define HAVE_DISK_INODE_VERSION
+++ /dev/null
-From fe18d649891d813964d3aaeebad873f281627fbc Mon Sep 17 00:00:00 2001
-From: Li Dongyang <dongyangli@ddn.com>
-Date: Sat, 15 Sep 2018 17:11:25 -0400
-Subject: [PATCH] ext4: don't mark mmp buffer head dirty
-
-Marking mmp bh dirty before writing it will make writeback
-pick up mmp block later and submit a write, we don't want the
-duplicate write as kmmpd thread should have full control of
-reading and writing the mmp block.
-Another reason is we will also have random I/O error on
-the writeback request when blk integrity is enabled, because
-kmmpd could modify the content of the mmp block(e.g. setting
-new seq and time) while the mmp block is under I/O requested
-by writeback.
-
-Signed-off-by: Li Dongyang <dongyangli@ddn.com>
-Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-Reviewed-by: Andreas Dilger <adilger@dilger.ca>
-Cc: stable@vger.kernel.org
----
- fs/ext4/mmp.c | 1 -
- 1 file changed, 1 deletion(-)
-
-Index: linux-stage/fs/ext4/mmp.c
-===================================================================
---- linux-stage.orig/fs/ext4/mmp.c
-+++ linux-stage/fs/ext4/mmp.c
-@@ -18,7 +18,6 @@ static int write_mmp_block(struct super_
- * on frozen filesystem.
- */
- sb_start_write(sb);
-- mark_buffer_dirty(bh);
- lock_buffer(bh);
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
+++ /dev/null
-Invoking ext4_truncate with i_mutex locked will cause a deadlock
-in lustre. Since lustre has own lock to provide protection we don't
-need this check at all.
-
-Index: linux-2.6.32-504.el6.x86_64/fs/ext4/inode.c
-===================================================================
---- linux-2.6.32-504.el6.x86_64.orig/fs/ext4/inode.c
-+++ linux-3.10.0-504.el6.x86_64/fs/ext4/inode.c
-@@ -3934,8 +3934,6 @@ void ext4_end_io_work(struct work_struct
- int ext4_flush_unwritten_io(struct inode *inode)
- {
- int ret;
-- WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
-- !(inode->i_state & I_FREEING));
- ret = ext4_do_flush_completed_IO(inode, NULL);
- ext4_unwritten_wait(inode);
- return ret;
+++ /dev/null
-Fix ext4_ext_find_extent() to already pre-allocate ext4_ext_path[]
-array of the max depth instead of current depth.
-This will avoid racy cases of concurrent ext_depth() growth in
-current and unsafe implementation with ext4_ext_path[] array
-re-[sizing,allocation], even with more recent and related patches
-that will be integrated in more recent Kernels.
-
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2016-07-15 10:55:51.000000000 +0300
-+++ linux-stage/fs/ext4/ext4.h 2016-07-15 10:56:19.000000000 +0300
-@@ -1153,6 +1153,9 @@ struct ext4_sb_info {
- unsigned long s_ext_extents;
- #endif
-
-+ /* maximum possible extents tree depth, to be computed at mount time */
-+ unsigned int s_max_ext_tree_depth;
-+
- /* for buddy allocator */
- struct ext4_group_info ***s_group_info;
- struct inode *s_buddy_cache;
-Index: linux-stage/fs/ext4/extents.c
-===================================================================
---- linux-stage.orig/fs/ext4/extents.c 2016-07-15 10:55:51.000000000 +0300
-+++ linux-stage/fs/ext4/extents.c 2016-07-15 10:56:19.000000000 +0300
-@@ -698,8 +698,9 @@ ext4_ext_find_extent(struct inode *inode
-
- /* account possible depth increase */
- if (!path) {
-- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
-- GFP_NOFS);
-+ path = kzalloc(sizeof(struct ext4_ext_path) *
-+ (EXT4_SB(inode->i_sb)->s_max_ext_tree_depth + 1),
-+ GFP_NOFS);
- if (!path)
- return ERR_PTR(-ENOMEM);
- alloc = 1;
-@@ -1907,11 +1908,8 @@ static int ext4_fill_fiemap_extents(stru
- /* find extent for this block */
- down_read(&EXT4_I(inode)->i_data_sem);
-
-- if (path && ext_depth(inode) != depth) {
-- /* depth was changed. we have to realloc path */
-- kfree(path);
-- path = NULL;
-- }
-+ /* path of max possible depth will be allocated during
-+ * first pass, so its space can be re-used for each loop */
-
- path = ext4_ext_find_extent(inode, block, path);
- if (IS_ERR(path)) {
-@@ -2656,7 +2654,8 @@ again:
- path[k].p_block =
- le16_to_cpu(path[k].p_hdr->eh_entries)+1;
- } else {
-- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
-+ path = kzalloc(sizeof(struct ext4_ext_path) *
-+ (EXT4_SB(inode->i_sb)->s_max_ext_tree_depth + 1),
- GFP_NOFS);
- if (path == NULL) {
- ext4_journal_stop(handle);
-@@ -2781,13 +2780,15 @@ out:
- */
- void ext4_ext_init(struct super_block *sb)
- {
-+ ext4_fsblk_t maxblocks;
-+
- /*
- * possible initialization would be here
- */
-
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
--#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
-- printk(KERN_INFO "EXT4-fs: file extents enabled");
-+ printk(KERN_INFO "EXT4-fs (%s): file extents enabled",
-+ sb->s_id);
- #ifdef AGGRESSIVE_TEST
- printk(", aggressive tests");
- #endif
-@@ -2796,14 +2797,35 @@ void ext4_ext_init(struct super_block *s
- #endif
- #ifdef EXTENTS_STATS
- printk(", stats");
--#endif
-- printk("\n");
--#endif
--#ifdef EXTENTS_STATS
- spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
- EXT4_SB(sb)->s_ext_min = 1 << 30;
- EXT4_SB(sb)->s_ext_max = 0;
- #endif
-+ EXT4_SB(sb)->s_max_ext_tree_depth = 1;
-+
-+ maxblocks = sb->s_maxbytes / sb->s_blocksize;
-+
-+ /* 1st/root level/node of extents tree stands in i_data and
-+ * entries stored in tree nodes can be of type ext4_extent
-+ * (leaf node) or ext4_extent_idx (internal node) */
-+ maxblocks /= (sizeof(((struct ext4_inode_info *)0x0)->i_data) -
-+ sizeof(struct ext4_extent_header)) /
-+ max(sizeof(struct ext4_extent),
-+ sizeof(struct ext4_extent_idx));
-+
-+ /* compute maximum extents tree depth for a fully populated
-+ * file of max size made of only minimal/1-block extents */
-+ while (maxblocks > 0) {
-+ maxblocks /= (sb->s_blocksize -
-+ sizeof(struct ext4_extent_header)) /
-+ max(sizeof(struct ext4_extent),
-+ sizeof(struct ext4_extent_idx));
-+ EXT4_SB(sb)->s_max_ext_tree_depth++;
-+ }
-+
-+ printk(", maximum tree depth=%u",
-+ EXT4_SB(sb)->s_max_ext_tree_depth);
-+ printk("\n");
- }
- }
-
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c 2016-07-15 10:55:51.000000000 +0300
-+++ linux-stage/fs/ext4/super.c 2016-07-15 10:56:19.000000000 +0300
-@@ -3529,6 +3529,8 @@ static int ext4_fill_super(struct super_
- if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
- goto failed_mount3;
-
-+ ext4_ext_init(sb); /* needed before using extent-mapped journal */
-+
- /*
- * The first inode we look at is the journal inode. Don't try
- * root first: it may be modified in the journal!
-@@ -3722,7 +3724,6 @@ no_journal:
- goto failed_mount4a;
- }
-
-- ext4_ext_init(sb);
- err = ext4_mb_init(sb, needs_recovery);
- if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
+++ /dev/null
-commit 18aadd47f88464928b5ce57791c2e8f9f2aaece0 (v3.3-rc2-7-g18aadd4)
-Author: Bobi Jam <bobijam@whamcloud.com>
-Date: Mon Feb 20 17:53:02 2012 -0500
-
-ext4: expand commit callback and use it for mballoc
-
-The per-commit callback was used by mballoc code to manage free space
-bitmaps after deleted blocks have been released. This patch expands
-it to support multiple different callbacks, to allow other things to
-be done after the commit has been completed.
-
-Signed-off-by: Bobi Jam <bobijam@whamcloud.com>
-Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-
-Index: linux-stage/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.h
-+++ linux-stage/fs/ext4/ext4_jbd2.h
-@@ -104,6 +104,80 @@
- #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
- #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
-
-+/**
-+ * struct ext4_journal_cb_entry - Base structure for callback information.
-+ *
-+ * This struct is a 'seed' structure for a using with your own callback
-+ * structs. If you are using callbacks you must allocate one of these
-+ * or another struct of your own definition which has this struct
-+ * as it's first element and pass it to ext4_journal_callback_add().
-+ */
-+struct ext4_journal_cb_entry {
-+ /* list information for other callbacks attached to the same handle */
-+ struct list_head jce_list;
-+
-+ /* Function to call with this callback structure */
-+ void (*jce_func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce, int error);
-+
-+ /* user data goes here */
-+};
-+
-+/**
-+ * ext4_journal_callback_add: add a function to call after transaction commit
-+ * @handle: active journal transaction handle to register callback on
-+ * @func: callback function to call after the transaction has committed:
-+ * @sb: superblock of current filesystem for transaction
-+ * @jce: returned journal callback data
-+ * @rc: journal state at commit (0 = transaction committed properly)
-+ * @jce: journal callback data (internal and function private data struct)
-+ *
-+ * The registered function will be called in the context of the journal thread
-+ * after the transaction for which the handle was created has completed.
-+ *
-+ * No locks are held when the callback function is called, so it is safe to
-+ * call blocking functions from within the callback, but the callback should
-+ * not block or run for too long, or the filesystem will be blocked waiting for
-+ * the next transaction to commit. No journaling functions can be used, or
-+ * there is a risk of deadlock.
-+ *
-+ * There is no guaranteed calling order of multiple registered callbacks on
-+ * the same transaction.
-+ */
-+static inline void ext4_journal_callback_add(handle_t *handle,
-+ void (*func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc),
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ /* Add the jce to transaction's private list */
-+ jce->jce_func = func;
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&jce->jce_list, &handle->h_transaction->t_private_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+/**
-+ * ext4_journal_callback_del: delete a registered callback
-+ * @handle: active journal transaction handle on which callback was registered
-+ * @jce: registered journal callback entry to unregister
-+ */
-+static inline void ext4_journal_callback_del(handle_t *handle,
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ spin_lock(&sbi->s_md_lock);
-+ list_del_init(&jce->jce_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+#define HAVE_EXT4_JOURNAL_CALLBACK_ADD
-+
- int
- ext4_mark_iloc_dirty(handle_t *handle,
- struct inode *inode,
-Index: linux-stage/fs/ext4/mballoc.h
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.h
-+++ linux-stage/fs/ext4/mballoc.h
-@@ -96,23 +96,24 @@ extern u8 mb_enable_debug;
- */
- #define MB_DEFAULT_GROUP_PREALLOC 512
-
--
- struct ext4_free_data {
-- /* this links the free block information from group_info */
-- struct rb_node node;
-+ /* MUST be the first member */
-+ struct ext4_journal_cb_entry efd_jce;
-
-- /* this links the free block information from ext4_sb_info */
-- struct list_head list;
-+ /* ext4_free_data private data starts from here */
-+
-+ /* this links the free block information from group_info */
-+ struct rb_node efd_node;
-
- /* group which free block extent belongs */
-- ext4_group_t group;
-+ ext4_group_t efd_group;
-
- /* free block extent */
-- ext4_grpblk_t start_blk;
-- ext4_grpblk_t count;
-+ ext4_grpblk_t efd_start_blk;
-+ ext4_grpblk_t efd_count;
-
- /* transaction which freed this extent */
-- tid_t t_tid;
-+ tid_t efd_tid;
- };
-
- struct ext4_prealloc_space {
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -21,6 +21,7 @@
- * mballoc.c contains the multiblocks allocation routines
- */
-
-+#include "ext4_jbd2.h"
- #include "mballoc.h"
- #include <linux/debugfs.h>
- #include <trace/events/ext4.h>
-@@ -336,12 +337,12 @@
- */
- static struct kmem_cache *ext4_pspace_cachep;
- static struct kmem_cache *ext4_ac_cachep;
--static struct kmem_cache *ext4_free_ext_cachep;
-+static struct kmem_cache *ext4_free_data_cachep;
- static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group);
- static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group);
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
-+static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error);
-
- static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
- {
-@@ -2583,8 +2584,6 @@ int ext4_mb_init(struct super_block *sb,
- }
- }
-
-- if (sbi->s_journal)
-- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
- return 0;
- }
-
-@@ -2686,58 +2685,54 @@ static inline int ext4_issue_discard(str
- * This function is called by the jbd2 layer once the commit has finished,
- * so we know we can free the blocks that were released with that commit.
- */
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
-+static void ext4_free_data_callback(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc)
- {
-- struct super_block *sb = journal->j_private;
-+ struct ext4_free_data *entry = (struct ext4_free_data *)jce;
- struct ext4_buddy e4b;
- struct ext4_group_info *db;
- int err, count = 0, count2 = 0;
-- struct ext4_free_data *entry;
-- struct list_head *l, *ltmp;
-
-- list_for_each_safe(l, ltmp, &txn->t_private_list) {
-- entry = list_entry(l, struct ext4_free_data, list);
-+ mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-+ entry->efd_count, entry->efd_group, entry);
-
-- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-- entry->count, entry->group, entry);
-+ if (test_opt(sb, DISCARD))
-+ ext4_issue_discard(sb, entry->efd_group,
-+ entry->efd_start_blk, entry->efd_count);
-+
-+ err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ db = e4b.bd_info;
-+ /* there are blocks to put in buddy to make them really free */
-+ count += entry->efd_count;
-+ count2++;
-+ ext4_lock_group(sb, entry->efd_group);
-+ /* Take it out of per group rb tree */
-+ rb_erase(&entry->efd_node, &(db->bb_free_root));
-+ mb_free_blocks(NULL, &e4b, entry->efd_start_blk, entry->efd_count);
-
-- if (test_opt(sb, DISCARD))
-- ext4_issue_discard(sb, entry->group,
-- entry->start_blk, entry->count);
--
-- err = ext4_mb_load_buddy(sb, entry->group, &e4b);
-- /* we expect to find existing buddy because it's pinned */
-- BUG_ON(err != 0);
--
-- db = e4b.bd_info;
-- /* there are blocks to put in buddy to make them really free */
-- count += entry->count;
-- count2++;
-- ext4_lock_group(sb, entry->group);
-- /* Take it out of per group rb tree */
-- rb_erase(&entry->node, &(db->bb_free_root));
-- mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
-+ /*
-+ * Clear the trimmed flag for the group so that the next
-+ * ext4_trim_fs can trim it.
-+ * If the volume is mounted with -o discard, online discard
-+ * is supported and the free blocks will be trimmed online.
-+ */
-+ if (!test_opt(sb, DISCARD))
-+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
-
-- /*
-- * Clear the trimmed flag for the group so that the next
-- * ext4_trim_fs can trim it.
-- * If the volume is mounted with -o discard, online discard
-- * is supported and the free blocks will be trimmed online.
-+ if (!db->bb_free_root.rb_node) {
-+ /* No more items in the per group rb tree
-+ * balance refcounts from ext4_mb_free_metadata()
- */
-- if (!test_opt(sb, DISCARD))
-- EXT4_MB_GRP_CLEAR_TRIMMED(db);
--
-- if (!db->bb_free_root.rb_node) {
-- /* No more items in the per group rb tree
-- * balance refcounts from ext4_mb_free_metadata()
-- */
-- page_cache_release(e4b.bd_buddy_page);
-- page_cache_release(e4b.bd_bitmap_page);
-- }
-- ext4_unlock_group(sb, entry->group);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-- ext4_mb_release_desc(&e4b);
-+ page_cache_release(e4b.bd_buddy_page);
-+ page_cache_release(e4b.bd_bitmap_page);
- }
-+ ext4_unlock_group(sb, entry->efd_group);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
-+ ext4_mb_release_desc(&e4b);
-
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
- }
-@@ -2789,22 +2784,22 @@ int __init init_ext4_mballoc(void)
- kmem_cache_create("ext4_alloc_context",
- sizeof(struct ext4_allocation_context),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_ac_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- return -ENOMEM;
-- }
-+ if (ext4_ac_cachep == NULL)
-+ goto out_err;
-+
-+ ext4_free_data_cachep =
-+ KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT);
-+ if (ext4_free_data_cachep == NULL)
-+ goto out1_err;
-
-- ext4_free_ext_cachep =
-- kmem_cache_create("ext4_free_block_extents",
-- sizeof(struct ext4_free_data),
-- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_free_ext_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- kmem_cache_destroy(ext4_ac_cachep);
-- return -ENOMEM;
-- }
- ext4_create_debugfs_entry();
- return 0;
-+
-+out1_err:
-+ kmem_cache_destroy(ext4_ac_cachep);
-+out_err:
-+ kmem_cache_destroy(ext4_pspace_cachep);
-+ return -ENOMEM;
- }
-
- void exit_ext4_mballoc(void)
-@@ -2816,7 +2811,7 @@ void exit_ext4_mballoc(void)
- rcu_barrier();
- kmem_cache_destroy(ext4_pspace_cachep);
- kmem_cache_destroy(ext4_ac_cachep);
-- kmem_cache_destroy(ext4_free_ext_cachep);
-+ kmem_cache_destroy(ext4_free_data_cachep);
- ext4_remove_debugfs_entry();
- }
-
-@@ -3375,8 +3370,8 @@ static void ext4_mb_generate_from_freeli
- n = rb_first(&(grp->bb_free_root));
-
- while (n) {
-- entry = rb_entry(n, struct ext4_free_data, node);
-- mb_set_bits(bitmap, entry->start_blk, entry->count);
-+ entry = rb_entry(n, struct ext4_free_data, efd_node);
-+ mb_set_bits(bitmap, entry->efd_start_blk, entry->efd_count);
- n = rb_next(n);
- }
- return;
-@@ -4631,11 +4626,11 @@ out:
- * AND the blocks are associated with the same group.
- */
- static int can_merge(struct ext4_free_data *entry1,
-- struct ext4_free_data *entry2)
-+ struct ext4_free_data *entry2)
- {
-- if ((entry1->t_tid == entry2->t_tid) &&
-- (entry1->group == entry2->group) &&
-- ((entry1->start_blk + entry1->count) == entry2->start_blk))
-+ if ((entry1->efd_tid == entry2->efd_tid) &&
-+ (entry1->efd_group == entry2->efd_group) &&
-+ ((entry1->efd_start_blk + entry1->efd_count) == entry2->efd_start_blk))
- return 1;
- return 0;
- }
-@@ -4648,7 +4643,6 @@ ext4_mb_free_metadata(handle_t *handle,
- struct ext4_free_data *entry;
- struct ext4_group_info *db = e4b->bd_info;
- struct super_block *sb = e4b->bd_sb;
-- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct rb_node **n = &db->bb_free_root.rb_node, *node;
- struct rb_node *parent = NULL, *new_node;
-
-@@ -4656,8 +4650,8 @@ ext4_mb_free_metadata(handle_t *handle,
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
-
-- new_node = &new_entry->node;
-- block = new_entry->start_blk;
-+ new_node = &new_entry->efd_node;
-+ block = new_entry->efd_start_blk;
-
- if (!*n) {
- /* first free block exent. We need to
-@@ -4670,15 +4664,15 @@ ext4_mb_free_metadata(handle_t *handle,
- }
- while (*n) {
- parent = *n;
-- entry = rb_entry(parent, struct ext4_free_data, node);
-- if (block < entry->start_blk)
-+ entry = rb_entry(parent, struct ext4_free_data, efd_node);
-+ if (block < entry->efd_start_blk)
- n = &(*n)->rb_left;
-- else if (block >= (entry->start_blk + entry->count))
-+ else if (block >= (entry->efd_start_blk + entry->efd_count))
- n = &(*n)->rb_right;
- else {
- ext4_grp_locked_error(sb, e4b->bd_group, __func__,
- "Double free of blocks %d (%d %d)",
-- block, entry->start_blk, entry->count);
-+ block, entry->efd_start_blk, entry->efd_count);
- return 0;
- }
- }
-@@ -4689,34 +4683,29 @@ ext4_mb_free_metadata(handle_t *handle,
- /* Now try to see the extent can be merged to left and right */
- node = rb_prev(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(entry, new_entry)) {
-- new_entry->start_blk = entry->start_blk;
-- new_entry->count += entry->count;
-+ new_entry->efd_start_blk = entry->efd_start_blk;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
-
- node = rb_next(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(new_entry, entry)) {
-- new_entry->count += entry->count;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
- /* Add the extent to transaction's private list */
-- spin_lock(&sbi->s_md_lock);
-- list_add(&new_entry->list, &handle->h_transaction->t_private_list);
-- spin_unlock(&sbi->s_md_lock);
-+ ext4_journal_callback_add(handle, ext4_free_data_callback,
-+ &new_entry->efd_jce);
- return 0;
- }
-
-@@ -4851,7 +4840,7 @@ do_more:
- * be used until this transaction is committed
- */
- retry:
-- new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
-+ new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
- if (!new_entry) {
- /*
- * We use a retry loop because
-@@ -4861,10 +4850,10 @@ retry:
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
-- new_entry->start_blk = bit;
-- new_entry->group = block_group;
-- new_entry->count = count;
-- new_entry->t_tid = handle->h_transaction->t_tid;
-+ new_entry->efd_start_blk = bit;
-+ new_entry->efd_group = block_group;
-+ new_entry->efd_count = count;
-+ new_entry->efd_tid = handle->h_transaction->t_tid;
-
- ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count);
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -336,6 +336,18 @@ void ext4_journal_abort_handle(const cha
- jbd2_journal_abort_handle(handle);
- }
-
-+static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
-+{
-+ struct super_block *sb = journal->j_private;
-+ int error = is_journal_aborted(journal);
-+ struct ext4_journal_cb_entry *jce, *tmp;
-+
-+ list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
-+ list_del_init(&jce->jce_list);
-+ jce->jce_func(sb, jce, error);
-+ }
-+}
-+
- /* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
-@@ -3492,6 +3504,8 @@ static int ext4_fill_super(struct super_
- ext4_count_dirs(sb));
- percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
-
-+ sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
-+
- no_journal:
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
+++ /dev/null
-Index: linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
-===================================================================
---- /dev/null
-+++ linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
-@@ -0,0 +1,187 @@
-+/*
-+ * include/linux/htree_lock.h
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+
-+/*
-+ * htree lock
-+ *
-+ * htree_lock is an advanced lock, it can support five lock modes (concept is
-+ * taken from DLM) and it's a sleeping lock.
-+ *
-+ * most common use case is:
-+ * - create a htree_lock_head for data
-+ * - each thread (contender) creates it's own htree_lock
-+ * - contender needs to call htree_lock(lock_node, mode) to protect data and
-+ * call htree_unlock to release lock
-+ *
-+ * Also, there is advanced use-case which is more complex, user can have
-+ * PW/PR lock on particular key, it's mostly used while user holding shared
-+ * lock on the htree (CW, CR)
-+ *
-+ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
-+ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
-+ * ...
-+ * htree_node_unlock(lock_node);; unlock the key
-+ *
-+ * Another tip is, we can have N-levels of this kind of keys, all we need to
-+ * do is specifying N-levels while creating htree_lock_head, then we can
-+ * lock/unlock a specific level by:
-+ * htree_node_lock(lock_node, mode1, key1, level1...);
-+ * do something;
-+ * htree_node_lock(lock_node, mode1, key2, level2...);
-+ * do something;
-+ * htree_node_unlock(lock_node, level2);
-+ * htree_node_unlock(lock_node, level1);
-+ *
-+ * NB: for multi-level, should be careful about locking order to avoid deadlock
-+ */
-+
-+#ifndef _LINUX_HTREE_LOCK_H
-+#define _LINUX_HTREE_LOCK_H
-+
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Lock Modes
-+ * more details can be found here:
-+ * http://en.wikipedia.org/wiki/Distributed_lock_manager
-+ */
-+typedef enum {
-+ HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
-+ HTREE_LOCK_PW, /* protected write: allows only CR users */
-+ HTREE_LOCK_PR, /* protected read: allow PR, CR users */
-+ HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
-+ HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
-+ HTREE_LOCK_MAX, /* number of lock modes */
-+} htree_lock_mode_t;
-+
-+#define HTREE_LOCK_NL HTREE_LOCK_MAX
-+#define HTREE_LOCK_INVAL 0xdead10c
-+
-+enum {
-+ HTREE_HBITS_MIN = 2,
-+ HTREE_HBITS_DEF = 14,
-+ HTREE_HBITS_MAX = 32,
-+};
-+
-+enum {
-+ HTREE_EVENT_DISABLE = (0),
-+ HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
-+ HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
-+ HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
-+};
-+
-+struct htree_lock;
-+
-+typedef void (*htree_event_cb_t)(void *target, void *event);
-+
-+struct htree_lock_child {
-+ struct list_head lc_list; /* granted list */
-+ htree_event_cb_t lc_callback; /* event callback */
-+ unsigned lc_events; /* event types */
-+};
-+
-+struct htree_lock_head {
-+ unsigned long lh_lock; /* bits lock */
-+ /* blocked lock list (htree_lock) */
-+ struct list_head lh_blocked_list;
-+ /* # key levels */
-+ u16 lh_depth;
-+ /* hash bits for key and limit number of locks */
-+ u16 lh_hbits;
-+ /* counters for blocked locks */
-+ u16 lh_nblocked[HTREE_LOCK_MAX];
-+ /* counters for granted locks */
-+ u16 lh_ngranted[HTREE_LOCK_MAX];
-+ /* private data */
-+ void *lh_private;
-+ /* array of children locks */
-+ struct htree_lock_child lh_children[0];
-+};
-+
-+/* htree_lock_node_t is child-lock for a specific key (ln_value) */
-+struct htree_lock_node {
-+ htree_lock_mode_t ln_mode;
-+ /* major hash key */
-+ u16 ln_major_key;
-+ /* minor hash key */
-+ u16 ln_minor_key;
-+ struct list_head ln_major_list;
-+ struct list_head ln_minor_list;
-+ /* alive list, all locks (granted, blocked, listening) are on it */
-+ struct list_head ln_alive_list;
-+ /* blocked list */
-+ struct list_head ln_blocked_list;
-+ /* granted list */
-+ struct list_head ln_granted_list;
-+ void *ln_ev_target;
-+};
-+
-+struct htree_lock {
-+ struct task_struct *lk_task;
-+ struct htree_lock_head *lk_head;
-+ void *lk_private;
-+ unsigned lk_depth;
-+ htree_lock_mode_t lk_mode;
-+ struct list_head lk_blocked_list;
-+ struct htree_lock_node lk_nodes[0];
-+};
-+
-+/* create a lock head, which stands for a resource */
-+struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
-+ unsigned hbits, unsigned priv);
-+/* free a lock head */
-+void htree_lock_head_free(struct htree_lock_head *lhead);
-+/* register event callback for child lock at level @depth */
-+void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
-+ unsigned events, htree_event_cb_t callback);
-+/* create a lock handle, which stands for a thread */
-+struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
-+/* free a lock handle */
-+void htree_lock_free(struct htree_lock *lck);
-+/* lock htree, when @wait is true, 0 is returned if the lock can't
-+ * be granted immediately */
-+int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ htree_lock_mode_t mode, int wait);
-+/* unlock htree */
-+void htree_unlock(struct htree_lock *lck);
-+/* unlock and relock htree with @new_mode */
-+int htree_change_lock_try(struct htree_lock *lck,
-+ htree_lock_mode_t new_mode, int wait);
-+void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
-+/* require child lock (key) of htree at level @dep, @event will be sent to all
-+ * listeners on this @key while lock being granted */
-+int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+ u32 key, unsigned dep, int wait, void *event);
-+/* release child lock at level @dep, this lock will listen on it's key
-+ * if @event isn't NULL, event_cb will be called against @lck while granting
-+ * any other lock at level @dep with the same key */
-+void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
-+/* stop listening on child lock at level @dep */
-+void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
-+/* for debug */
-+void htree_lock_stat_print(int depth);
-+void htree_lock_stat_reset(void);
-+
-+#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
-+#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
-+
-+#define htree_lock_mode(lck) ((lck)->lk_mode)
-+
-+#define htree_node_lock(lck, mode, key, dep) \
-+ htree_node_lock_try(lck, mode, key, dep, 1, NULL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_granted(lck, dep) \
-+ ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
-+ (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_listening(lck, dep) \
-+ ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
-+
-+#endif
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
-===================================================================
---- /dev/null
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
-+/*
-+ * fs/ext4/htree_lock.c
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+#include <linux/jbd2.h>
-+#include <linux/hash.h>
-+#include <linux/module.h>
-+#include <linux/htree_lock.h>
-+
-+enum {
-+ HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
-+ HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
-+ HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
-+ HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
-+ HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
-+};
-+
-+enum {
-+ HTREE_LOCK_COMPAT_EX = 0,
-+ HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
-+ HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
-+ HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
-+ HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
-+ HTREE_LOCK_BIT_PW,
-+};
-+
-+static int htree_lock_compat[] = {
-+ [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
-+ [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
-+ [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
-+ [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
-+ [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
-+};
-+
-+/* max allowed htree-lock depth.
-+ * We only need depth=3 for ext4 although user can have higher value. */
-+#define HTREE_LOCK_DEP_MAX 16
-+
-+#ifdef HTREE_LOCK_DEBUG
-+
-+static char *hl_name[] = {
-+ [HTREE_LOCK_EX] "EX",
-+ [HTREE_LOCK_PW] "PW",
-+ [HTREE_LOCK_PR] "PR",
-+ [HTREE_LOCK_CW] "CW",
-+ [HTREE_LOCK_CR] "CR",
-+};
-+
-+/* lock stats */
-+struct htree_lock_node_stats {
-+ unsigned long long blocked[HTREE_LOCK_MAX];
-+ unsigned long long granted[HTREE_LOCK_MAX];
-+ unsigned long long retried[HTREE_LOCK_MAX];
-+ unsigned long long events;
-+};
-+
-+struct htree_lock_stats {
-+ struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
-+ unsigned long long granted[HTREE_LOCK_MAX];
-+ unsigned long long blocked[HTREE_LOCK_MAX];
-+};
-+
-+static struct htree_lock_stats hl_stats;
-+
-+void htree_lock_stat_reset(void)
-+{
-+ memset(&hl_stats, 0, sizeof(hl_stats));
-+}
-+
-+void htree_lock_stat_print(int depth)
-+{
-+ int i;
-+ int j;
-+
-+ printk(KERN_DEBUG "HTREE LOCK STATS:\n");
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
-+ hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
-+ }
-+ for (i = 0; i < depth; i++) {
-+ printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
-+ for (j = 0; j < HTREE_LOCK_MAX; j++) {
-+ printk(KERN_DEBUG
-+ "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
-+ hl_name[j], hl_stats.nodes[i].granted[j],
-+ hl_stats.nodes[i].blocked[j],
-+ hl_stats.nodes[i].retried[j]);
-+ }
-+ }
-+}
-+
-+#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
-+#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
-+#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
-+#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
-+#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
-+#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
-+
-+#else /* !DEBUG */
-+
-+void htree_lock_stat_reset(void) {}
-+void htree_lock_stat_print(int depth) {}
-+
-+#define lk_grant_inc(m) do {} while (0)
-+#define lk_block_inc(m) do {} while (0)
-+#define ln_grant_inc(d, m) do {} while (0)
-+#define ln_block_inc(d, m) do {} while (0)
-+#define ln_retry_inc(d, m) do {} while (0)
-+#define ln_event_inc(d) do {} while (0)
-+
-+#endif /* DEBUG */
-+
-+EXPORT_SYMBOL(htree_lock_stat_reset);
-+EXPORT_SYMBOL(htree_lock_stat_print);
-+
-+#define HTREE_DEP_ROOT (-1)
-+
-+#define htree_spin_lock(lhead, dep) \
-+ bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
-+#define htree_spin_unlock(lhead, dep) \
-+ bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
-+
-+#define htree_key_event_ignore(child, ln) \
-+ (!((child)->lc_events & (1 << (ln)->ln_mode)))
-+
-+static int
-+htree_key_list_empty(struct htree_lock_node *ln)
-+{
-+ return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
-+}
-+
-+static void
-+htree_key_list_del_init(struct htree_lock_node *ln)
-+{
-+ struct htree_lock_node *tmp = NULL;
-+
-+ if (!list_empty(&ln->ln_minor_list)) {
-+ tmp = list_entry(ln->ln_minor_list.next,
-+ struct htree_lock_node, ln_minor_list);
-+ list_del_init(&ln->ln_minor_list);
-+ }
-+
-+ if (list_empty(&ln->ln_major_list))
-+ return;
-+
-+ if (tmp == NULL) { /* not on minor key list */
-+ list_del_init(&ln->ln_major_list);
-+ } else {
-+ BUG_ON(!list_empty(&tmp->ln_major_list));
-+ list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
-+ }
-+}
-+
-+static void
-+htree_key_list_replace_init(struct htree_lock_node *old,
-+ struct htree_lock_node *new)
-+{
-+ if (!list_empty(&old->ln_major_list))
-+ list_replace_init(&old->ln_major_list, &new->ln_major_list);
-+
-+ if (!list_empty(&old->ln_minor_list))
-+ list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
-+}
-+
-+static void
-+htree_key_event_enqueue(struct htree_lock_child *child,
-+ struct htree_lock_node *ln, int dep, void *event)
-+{
-+ struct htree_lock_node *tmp;
-+
-+ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+ BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
-+ if (event == NULL || htree_key_event_ignore(child, ln))
-+ return;
-+
-+ /* shouldn't be a very long list */
-+ list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
-+ if (tmp->ln_mode == HTREE_LOCK_NL) {
-+ ln_event_inc(dep);
-+ if (child->lc_callback != NULL)
-+ child->lc_callback(tmp->ln_ev_target, event);
-+ }
-+ }
-+}
-+
-+static int
-+htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
-+ unsigned dep, int wait, void *event)
-+{
-+ struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
-+ struct htree_lock_node *newln = &newlk->lk_nodes[dep];
-+ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+
-+ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+ /* NB: we only expect PR/PW lock mode at here, only these two modes are
-+ * allowed for htree_node_lock(asserted in htree_node_lock_internal),
-+ * NL is only used for listener, user can't directly require NL mode */
-+ if ((curln->ln_mode == HTREE_LOCK_NL) ||
-+ (curln->ln_mode != HTREE_LOCK_PW &&
-+ newln->ln_mode != HTREE_LOCK_PW)) {
-+ /* no conflict, attach it on granted list of @curlk */
-+ if (curln->ln_mode != HTREE_LOCK_NL) {
-+ list_add(&newln->ln_granted_list,
-+ &curln->ln_granted_list);
-+ } else {
-+ /* replace key owner */
-+ htree_key_list_replace_init(curln, newln);
-+ }
-+
-+ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+ htree_key_event_enqueue(child, newln, dep, event);
-+ ln_grant_inc(dep, newln->ln_mode);
-+ return 1; /* still hold lh_lock */
-+ }
-+
-+ if (!wait) { /* can't grant and don't want to wait */
-+ ln_retry_inc(dep, newln->ln_mode);
-+ newln->ln_mode = HTREE_LOCK_INVAL;
-+ return -1; /* don't wait and just return -1 */
-+ }
-+
-+ newlk->lk_task = current;
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ /* conflict, attach it on blocked list of curlk */
-+ list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
-+ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+ ln_block_inc(dep, newln->ln_mode);
-+
-+ htree_spin_unlock(newlk->lk_head, dep);
-+ /* wait to be given the lock */
-+ if (newlk->lk_task != NULL)
-+ schedule();
-+ /* granted, no doubt, wake up will set me RUNNING */
-+ if (event == NULL || htree_key_event_ignore(child, newln))
-+ return 0; /* granted without lh_lock */
-+
-+ htree_spin_lock(newlk->lk_head, dep);
-+ htree_key_event_enqueue(child, newln, dep, event);
-+ return 1; /* still hold lh_lock */
-+}
-+
-+/*
-+ * get PR/PW access to particular tree-node according to @dep and @key,
-+ * it will return -1 if @wait is false and can't immediately grant this lock.
-+ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
-+ * @event if it's not NULL.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
-+ htree_lock_mode_t mode, u32 key, unsigned dep,
-+ int wait, void *event)
-+{
-+ LIST_HEAD (list);
-+ struct htree_lock *tmp;
-+ struct htree_lock *tmp2;
-+ u16 major;
-+ u16 minor;
-+ u8 reverse;
-+ u8 ma_bits;
-+ u8 mi_bits;
-+
-+ BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
-+ BUG_ON(htree_node_is_granted(lck, dep));
-+
-+ key = hash_long(key, lhead->lh_hbits);
-+
-+ mi_bits = lhead->lh_hbits >> 1;
-+ ma_bits = lhead->lh_hbits - mi_bits;
-+
-+ lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
-+ lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
-+ lck->lk_nodes[dep].ln_mode = mode;
-+
-+ /*
-+ * The major key list is an ordered list, so searches are started
-+ * at the end of the list that is numerically closer to major_key,
-+ * so at most half of the list will be walked (for well-distributed
-+ * keys). The list traversal aborts early if the expected key
-+ * location is passed.
-+ */
-+ reverse = (major >= (1 << (ma_bits - 1)));
-+
-+ if (reverse) {
-+ list_for_each_entry_reverse(tmp,
-+ &lhead->lh_children[dep].lc_list,
-+ lk_nodes[dep].ln_major_list) {
-+ if (tmp->lk_nodes[dep].ln_major_key == major) {
-+ goto search_minor;
-+
-+ } else if (tmp->lk_nodes[dep].ln_major_key < major) {
-+ /* attach _after_ @tmp */
-+ list_add(&lck->lk_nodes[dep].ln_major_list,
-+ &tmp->lk_nodes[dep].ln_major_list);
-+ goto out_grant_major;
-+ }
-+ }
-+
-+ list_add(&lck->lk_nodes[dep].ln_major_list,
-+ &lhead->lh_children[dep].lc_list);
-+ goto out_grant_major;
-+
-+ } else {
-+ list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
-+ lk_nodes[dep].ln_major_list) {
-+ if (tmp->lk_nodes[dep].ln_major_key == major) {
-+ goto search_minor;
-+
-+ } else if (tmp->lk_nodes[dep].ln_major_key > major) {
-+ /* insert _before_ @tmp */
-+ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+ &tmp->lk_nodes[dep].ln_major_list);
-+ goto out_grant_major;
-+ }
-+ }
-+
-+ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+ &lhead->lh_children[dep].lc_list);
-+ goto out_grant_major;
-+ }
-+
-+ search_minor:
-+ /*
-+ * NB: minor_key list doesn't have a "head", @list is just a
-+ * temporary stub for helping list searching, make sure it's removed
-+ * after searching.
-+ * minor_key list is an ordered list too.
-+ */
-+ list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
-+
-+ reverse = (minor >= (1 << (mi_bits - 1)));
-+
-+ if (reverse) {
-+ list_for_each_entry_reverse(tmp2, &list,
-+ lk_nodes[dep].ln_minor_list) {
-+ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+ goto out_enqueue;
-+
-+ } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
-+ /* attach _after_ @tmp2 */
-+ list_add(&lck->lk_nodes[dep].ln_minor_list,
-+ &tmp2->lk_nodes[dep].ln_minor_list);
-+ goto out_grant_minor;
-+ }
-+ }
-+
-+ list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
-+
-+ } else {
-+ list_for_each_entry(tmp2, &list,
-+ lk_nodes[dep].ln_minor_list) {
-+ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+ goto out_enqueue;
-+
-+ } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
-+ /* insert _before_ @tmp2 */
-+ list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
-+ &tmp2->lk_nodes[dep].ln_minor_list);
-+ goto out_grant_minor;
-+ }
-+ }
-+
-+ list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
-+ }
-+
-+ out_grant_minor:
-+ if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
-+ /* new lock @lck is the first one on minor_key list, which
-+ * means it has the smallest minor_key and it should
-+ * replace @tmp as minor_key owner */
-+ list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
-+ &lck->lk_nodes[dep].ln_major_list);
-+ }
-+ /* remove the temporary head */
-+ list_del(&list);
-+
-+ out_grant_major:
-+ ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
-+ return 1; /* granted with holding lh_lock */
-+
-+ out_enqueue:
-+ list_del(&list); /* remove temprary head */
-+ return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
-+}
-+
-+/*
-+ * release the key of @lck at level @dep, and grant any blocked locks.
-+ * caller will still listen on @key if @event is not NULL, which means
-+ * caller can see a event (by event_cb) while granting any lock with
-+ * the same key at level @dep.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
-+ */
-+static void
-+htree_node_unlock_internal(struct htree_lock_head *lhead,
-+ struct htree_lock *curlk, unsigned dep, void *event)
-+{
-+ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+ struct htree_lock *grtlk = NULL;
-+ struct htree_lock_node *grtln;
-+ struct htree_lock *poslk;
-+ struct htree_lock *tmplk;
-+
-+ if (!htree_node_is_granted(curlk, dep))
-+ return;
-+
-+ if (!list_empty(&curln->ln_granted_list)) {
-+ /* there is another granted lock */
-+ grtlk = list_entry(curln->ln_granted_list.next,
-+ struct htree_lock,
-+ lk_nodes[dep].ln_granted_list);
-+ list_del_init(&curln->ln_granted_list);
-+ }
-+
-+ if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
-+ /*
-+ * @curlk is the only granted lock, so we confirmed:
-+ * a) curln is key owner (attached on major/minor_list),
-+ * so if there is any blocked lock, it should be attached
-+ * on curln->ln_blocked_list
-+ * b) we always can grant the first blocked lock
-+ */
-+ grtlk = list_entry(curln->ln_blocked_list.next,
-+ struct htree_lock,
-+ lk_nodes[dep].ln_blocked_list);
-+ BUG_ON(grtlk->lk_task == NULL);
-+ wake_up_process(grtlk->lk_task);
-+ }
-+
-+ if (event != NULL &&
-+ lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
-+ curln->ln_ev_target = event;
-+ curln->ln_mode = HTREE_LOCK_NL; /* listen! */
-+ } else {
-+ curln->ln_mode = HTREE_LOCK_INVAL;
-+ }
-+
-+ if (grtlk == NULL) { /* I must be the only one locking this key */
-+ struct htree_lock_node *tmpln;
-+
-+ BUG_ON(htree_key_list_empty(curln));
-+
-+ if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
-+ return;
-+
-+ /* not listening */
-+ if (list_empty(&curln->ln_alive_list)) { /* no more listener */
-+ htree_key_list_del_init(curln);
-+ return;
-+ }
-+
-+ tmpln = list_entry(curln->ln_alive_list.next,
-+ struct htree_lock_node, ln_alive_list);
-+
-+ BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
-+
-+ htree_key_list_replace_init(curln, tmpln);
-+ list_del_init(&curln->ln_alive_list);
-+
-+ return;
-+ }
-+
-+ /* have a granted lock */
-+ grtln = &grtlk->lk_nodes[dep];
-+ if (!list_empty(&curln->ln_blocked_list)) {
-+ /* only key owner can be on both lists */
-+ BUG_ON(htree_key_list_empty(curln));
-+
-+ if (list_empty(&grtln->ln_blocked_list)) {
-+ list_add(&grtln->ln_blocked_list,
-+ &curln->ln_blocked_list);
-+ }
-+ list_del_init(&curln->ln_blocked_list);
-+ }
-+ /*
-+ * NB: this is the tricky part:
-+ * We have only two modes for child-lock (PR and PW), also,
-+ * only owner of the key (attached on major/minor_list) can be on
-+ * both blocked_list and granted_list, so @grtlk must be one
-+ * of these two cases:
-+ *
-+ * a) @grtlk is taken from granted_list, which means we've granted
-+ * more than one lock so @grtlk has to be PR, the first blocked
-+ * lock must be PW and we can't grant it at all.
-+ * So even @grtlk is not owner of the key (empty blocked_list),
-+ * we don't care because we can't grant any lock.
-+ * b) we just grant a new lock which is taken from head of blocked
-+ * list, and it should be the first granted lock, and it should
-+ * be the first one linked on blocked_list.
-+ *
-+ * Either way, we can get correct result by iterating blocked_list
-+ * of @grtlk, and don't have to bother on how to find out
-+ * owner of current key.
-+ */
-+ list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
-+ lk_nodes[dep].ln_blocked_list) {
-+ if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
-+ poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
-+ break;
-+ /* grant all readers */
-+ list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
-+ list_add(&poslk->lk_nodes[dep].ln_granted_list,
-+ &grtln->ln_granted_list);
-+
-+ BUG_ON(poslk->lk_task == NULL);
-+ wake_up_process(poslk->lk_task);
-+ }
-+
-+ /* if @curln is the owner of this key, replace it with @grtln */
-+ if (!htree_key_list_empty(curln))
-+ htree_key_list_replace_init(curln, grtln);
-+
-+ if (curln->ln_mode == HTREE_LOCK_INVAL)
-+ list_del_init(&curln->ln_alive_list);
-+}
-+
-+/*
-+ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
-+ * and 0 only if @wait is false and can't grant it immediately
-+ */
-+int
-+htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+ u32 key, unsigned dep, int wait, void *event)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int rc;
-+
-+ BUG_ON(dep >= lck->lk_depth);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_spin_lock(lhead, dep);
-+ rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, dep);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_node_lock_try);
-+
-+/* it's wrapper of htree_node_unlock_internal */
-+void
-+htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+
-+ BUG_ON(dep >= lck->lk_depth);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_spin_lock(lhead, dep);
-+ htree_node_unlock_internal(lhead, lck, dep, event);
-+ htree_spin_unlock(lhead, dep);
-+}
-+EXPORT_SYMBOL(htree_node_unlock);
-+
-+/* stop listening on child-lock level @dep */
-+void
-+htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
-+{
-+ struct htree_lock_node *ln = &lck->lk_nodes[dep];
-+ struct htree_lock_node *tmp;
-+
-+ BUG_ON(htree_node_is_granted(lck, dep));
-+ BUG_ON(!list_empty(&ln->ln_blocked_list));
-+ BUG_ON(!list_empty(&ln->ln_granted_list));
-+
-+ if (!htree_node_is_listening(lck, dep))
-+ return;
-+
-+ htree_spin_lock(lck->lk_head, dep);
-+ ln->ln_mode = HTREE_LOCK_INVAL;
-+ ln->ln_ev_target = NULL;
-+
-+ if (htree_key_list_empty(ln)) { /* not owner */
-+ list_del_init(&ln->ln_alive_list);
-+ goto out;
-+ }
-+
-+ /* I'm the owner... */
-+ if (list_empty(&ln->ln_alive_list)) { /* no more listener */
-+ htree_key_list_del_init(ln);
-+ goto out;
-+ }
-+
-+ tmp = list_entry(ln->ln_alive_list.next,
-+ struct htree_lock_node, ln_alive_list);
-+
-+ BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
-+ htree_key_list_replace_init(ln, tmp);
-+ list_del_init(&ln->ln_alive_list);
-+ out:
-+ htree_spin_unlock(lck->lk_head, dep);
-+}
-+EXPORT_SYMBOL(htree_node_stop_listen);
-+
-+/* release all child-locks if we have any */
-+static void
-+htree_node_release_all(struct htree_lock *lck)
-+{
-+ int i;
-+
-+ for (i = 0; i < lck->lk_depth; i++) {
-+ if (htree_node_is_granted(lck, i))
-+ htree_node_unlock(lck, i, NULL);
-+ else if (htree_node_is_listening(lck, i))
-+ htree_node_stop_listen(lck, i);
-+ }
-+}
-+
-+/*
-+ * obtain htree lock, it could be blocked inside if there's conflict
-+ * with any granted or blocked lock and @wait is true.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_lock_internal(struct htree_lock *lck, int wait)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int granted = 0;
-+ int blocked = 0;
-+ int i;
-+
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ if (lhead->lh_ngranted[i] != 0)
-+ granted |= 1 << i;
-+ if (lhead->lh_nblocked[i] != 0)
-+ blocked |= 1 << i;
-+ }
-+ if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
-+ (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
-+ /* will block current lock even it just conflicts with any
-+ * other blocked lock, so lock like EX wouldn't starve */
-+ if (!wait)
-+ return -1;
-+ lhead->lh_nblocked[lck->lk_mode]++;
-+ lk_block_inc(lck->lk_mode);
-+
-+ lck->lk_task = current;
-+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
-+
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ /* wait to be given the lock */
-+ if (lck->lk_task != NULL)
-+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
-+ return 0; /* without lh_lock */
-+ }
-+ lhead->lh_ngranted[lck->lk_mode]++;
-+ lk_grant_inc(lck->lk_mode);
-+ return 1;
-+}
-+
-+/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
-+static void
-+htree_unlock_internal(struct htree_lock *lck)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ struct htree_lock *tmp;
-+ struct htree_lock *tmp2;
-+ int granted = 0;
-+ int i;
-+
-+ BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
-+
-+ lhead->lh_ngranted[lck->lk_mode]--;
-+ lck->lk_mode = HTREE_LOCK_INVAL;
-+
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ if (lhead->lh_ngranted[i] != 0)
-+ granted |= 1 << i;
-+ }
-+ list_for_each_entry_safe(tmp, tmp2,
-+ &lhead->lh_blocked_list, lk_blocked_list) {
-+ /* conflict with any granted lock? */
-+ if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
-+ break;
-+
-+ list_del_init(&tmp->lk_blocked_list);
-+
-+ BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
-+
-+ lhead->lh_nblocked[tmp->lk_mode]--;
-+ lhead->lh_ngranted[tmp->lk_mode]++;
-+ granted |= 1 << tmp->lk_mode;
-+
-+ BUG_ON(tmp->lk_task == NULL);
-+ wake_up_process(tmp->lk_task);
-+ }
-+}
-+
-+/* it's wrapper of htree_lock_internal and exported interface.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ htree_lock_mode_t mode, int wait)
-+{
-+ int rc;
-+
-+ BUG_ON(lck->lk_depth > lhead->lh_depth);
-+ BUG_ON(lck->lk_head != NULL);
-+ BUG_ON(lck->lk_task != NULL);
-+
-+ lck->lk_head = lhead;
-+ lck->lk_mode = mode;
-+
-+ htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+ rc = htree_lock_internal(lck, wait);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_lock_try);
-+
-+/* it's wrapper of htree_unlock_internal and exported interface.
-+ * It will release all htree_node_locks and htree_lock */
-+void
-+htree_unlock(struct htree_lock *lck)
-+{
-+ BUG_ON(lck->lk_head == NULL);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_node_release_all(lck);
-+
-+ htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
-+ htree_unlock_internal(lck);
-+ htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
-+ lck->lk_head = NULL;
-+ lck->lk_task = NULL;
-+}
-+EXPORT_SYMBOL(htree_unlock);
-+
-+/* change lock mode */
-+void
-+htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
-+{
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+ lck->lk_mode = mode;
-+}
-+EXPORT_SYMBOL(htree_change_mode);
-+
-+/* release htree lock, and lock it again with new mode.
-+ * This function will first release all htree_node_locks and htree_lock,
-+ * then try to gain htree_lock with new @mode.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int rc;
-+
-+ BUG_ON(lhead == NULL);
-+ BUG_ON(lck->lk_mode == mode);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
-+
-+ htree_node_release_all(lck);
-+
-+ htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+ htree_unlock_internal(lck);
-+ lck->lk_mode = mode;
-+ rc = htree_lock_internal(lck, wait);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_change_lock_try);
-+
-+/* create a htree_lock head with @depth levels (number of child-locks),
-+ * it is a per resoruce structure */
-+struct htree_lock_head *
-+htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
-+{
-+ struct htree_lock_head *lhead;
-+ int i;
-+
-+ if (depth > HTREE_LOCK_DEP_MAX) {
-+ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+ depth, HTREE_LOCK_DEP_MAX);
-+ return NULL;
-+ }
-+
-+ lhead = kzalloc(offsetof(struct htree_lock_head,
-+ lh_children[depth]) + priv, GFP_NOFS);
-+ if (lhead == NULL)
-+ return NULL;
-+
-+ if (hbits < HTREE_HBITS_MIN)
-+ lhead->lh_hbits = HTREE_HBITS_MIN;
-+ else if (hbits > HTREE_HBITS_MAX)
-+ lhead->lh_hbits = HTREE_HBITS_MAX;
-+
-+ lhead->lh_lock = 0;
-+ lhead->lh_depth = depth;
-+ INIT_LIST_HEAD(&lhead->lh_blocked_list);
-+ if (priv > 0) {
-+ lhead->lh_private = (void *)lhead +
-+ offsetof(struct htree_lock_head, lh_children[depth]);
-+ }
-+
-+ for (i = 0; i < depth; i++) {
-+ INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
-+ lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
-+ }
-+ return lhead;
-+}
-+EXPORT_SYMBOL(htree_lock_head_alloc);
-+
-+/* free the htree_lock head */
-+void
-+htree_lock_head_free(struct htree_lock_head *lhead)
-+{
-+ int i;
-+
-+ BUG_ON(!list_empty(&lhead->lh_blocked_list));
-+ for (i = 0; i < lhead->lh_depth; i++)
-+ BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
-+ kfree(lhead);
-+}
-+EXPORT_SYMBOL(htree_lock_head_free);
-+
-+/* register event callback for @events of child-lock at level @dep */
-+void
-+htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
-+ unsigned events, htree_event_cb_t callback)
-+{
-+ BUG_ON(lhead->lh_depth <= dep);
-+ lhead->lh_children[dep].lc_events = events;
-+ lhead->lh_children[dep].lc_callback = callback;
-+}
-+EXPORT_SYMBOL(htree_lock_event_attach);
-+
-+/* allocate a htree_lock, which is per-thread structure, @pbytes is some
-+ * extra-bytes as private data for caller */
-+struct htree_lock *
-+htree_lock_alloc(unsigned depth, unsigned pbytes)
-+{
-+ struct htree_lock *lck;
-+ int i = offsetof(struct htree_lock, lk_nodes[depth]);
-+
-+ if (depth > HTREE_LOCK_DEP_MAX) {
-+ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+ depth, HTREE_LOCK_DEP_MAX);
-+ return NULL;
-+ }
-+ lck = kzalloc(i + pbytes, GFP_NOFS);
-+ if (lck == NULL)
-+ return NULL;
-+
-+ if (pbytes != 0)
-+ lck->lk_private = (void *)lck + i;
-+ lck->lk_mode = HTREE_LOCK_INVAL;
-+ lck->lk_depth = depth;
-+ INIT_LIST_HEAD(&lck->lk_blocked_list);
-+
-+ for (i = 0; i < depth; i++) {
-+ struct htree_lock_node *node = &lck->lk_nodes[i];
-+
-+ node->ln_mode = HTREE_LOCK_INVAL;
-+ INIT_LIST_HEAD(&node->ln_major_list);
-+ INIT_LIST_HEAD(&node->ln_minor_list);
-+ INIT_LIST_HEAD(&node->ln_alive_list);
-+ INIT_LIST_HEAD(&node->ln_blocked_list);
-+ INIT_LIST_HEAD(&node->ln_granted_list);
-+ }
-+
-+ return lck;
-+}
-+EXPORT_SYMBOL(htree_lock_alloc);
-+
-+/* free htree_lock node */
-+void
-+htree_lock_free(struct htree_lock *lck)
-+{
-+ BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
-+ kfree(lck);
-+}
-+EXPORT_SYMBOL(htree_lock_free);
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-@@ -27,6 +27,7 @@
- #include <linux/mutex.h>
- #include <linux/timer.h>
- #include <linux/wait.h>
-+#include <linux/htree_lock.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #ifdef __KERNEL__
-@@ -1625,6 +1626,71 @@ ext4_dir_htree_level(struct super_block
- EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
- }
-
-+/* assume name-hash is protected by upper layer */
-+#define EXT4_HTREE_LOCK_HASH 0
-+
-+enum ext4_pdo_lk_types {
-+#if EXT4_HTREE_LOCK_HASH
-+ EXT4_LK_HASH,
-+#endif
-+ EXT4_LK_DX, /* index block */
-+ EXT4_LK_DE, /* directory entry block */
-+ EXT4_LK_SPIN, /* spinlock */
-+ EXT4_LK_MAX,
-+};
-+
-+/* read-only bit */
-+#define EXT4_LB_RO(b) (1 << (b))
-+/* read + write, high bits for writer */
-+#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
-+
-+enum ext4_pdo_lock_bits {
-+ /* DX lock bits */
-+ EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
-+ EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
-+ /* DE lock bits */
-+ EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
-+ EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
-+ /* DX spinlock bits */
-+ EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
-+ EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
-+ /* accurate searching */
-+ EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
-+};
-+
-+enum ext4_pdo_lock_opc {
-+ /* external */
-+ EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
-+ EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
-+
-+ /* internal */
-+ EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
-+ EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
-+};
-+
-+extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
-+#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
-+
-+extern struct htree_lock *ext4_htree_lock_alloc(void);
-+#define ext4_htree_lock_free(lck) htree_lock_free(lck)
-+
-+extern void ext4_htree_lock(struct htree_lock *lck,
-+ struct htree_lock_head *lhead,
-+ struct inode *dir, unsigned flags);
-+#define ext4_htree_unlock(lck) htree_unlock(lck)
-+
-+extern struct buffer_head * __ext4_find_entry(struct inode *dir,
-+ const struct qstr *d_name,
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck);
-+extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode, struct htree_lock *lck);
- void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
- ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
-
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-@@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
- struct inode *dir,
- struct dx_hash_info *hinfo,
- struct dx_frame *frame,
-- int *err);
-+ struct htree_lock *lck, int *err);
- static void dx_release(struct dx_frame *frames);
- static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-@@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
-- __u32 *start_hash);
-+ __u32 *start_hash, struct htree_lock *lck);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
- const struct qstr *d_name,
- struct ext4_dir_entry_2 **res_dir,
-- int *err);
-+ struct htree_lock *lck, int *err);
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode);
-+ struct inode *inode, struct htree_lock *lck);
-
- /*
- * p is at least 6 bytes before the end of page
-@@ -368,6 +368,225 @@ struct stats dx_show_entries(struct dx_h
- }
- #endif /* DX_DEBUG */
-
-+/* private data for htree_lock */
-+struct ext4_dir_lock_data {
-+ unsigned ld_flags; /* bits-map for lock types */
-+ unsigned ld_count; /* # entries of the last DX block */
-+ struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
-+ struct dx_entry *ld_at; /* position of leaf dx_entry */
-+};
-+
-+#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
-+#define ext4_find_entry(dir, name, dirent) __ext4_find_entry(dir, name, dirent, NULL)
-+#define ext4_add_entry(handle, dentry, inode) __ext4_add_entry(handle, dentry, inode, NULL)
-+
-+/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
-+#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
-+
-+static void ext4_htree_event_cb(void *target, void *event)
-+{
-+ u64 *block = (u64 *)target;
-+
-+ if (*block == dx_get_block((struct dx_entry *)event))
-+ *block = EXT4_HTREE_NODE_CHANGED;
-+}
-+
-+struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
-+{
-+ struct htree_lock_head *lhead;
-+
-+ lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
-+ if (lhead != NULL) {
-+ htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
-+ ext4_htree_event_cb);
-+ }
-+ return lhead;
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
-+
-+struct htree_lock *ext4_htree_lock_alloc(void)
-+{
-+ return htree_lock_alloc(EXT4_LK_MAX,
-+ sizeof(struct ext4_dir_lock_data));
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_alloc);
-+
-+static htree_lock_mode_t ext4_htree_mode(unsigned flags)
-+{
-+ switch (flags) {
-+ default: /* 0 or unknown flags require EX lock */
-+ return HTREE_LOCK_EX;
-+ case EXT4_HLOCK_READDIR:
-+ return HTREE_LOCK_PR;
-+ case EXT4_HLOCK_LOOKUP:
-+ return HTREE_LOCK_CR;
-+ case EXT4_HLOCK_DEL:
-+ case EXT4_HLOCK_ADD:
-+ return HTREE_LOCK_CW;
-+ }
-+}
-+
-+/* return PR for read-only operations, otherwise return EX */
-+static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
-+{
-+ int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
-+
-+ /* 0 requires EX lock */
-+ return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
-+}
-+
-+static int ext4_htree_safe_locked(struct htree_lock *lck)
-+{
-+ int writer;
-+
-+ if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
-+ return 1;
-+
-+ writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
-+ EXT4_LB_DE;
-+ if (writer) /* all readers & writers are excluded? */
-+ return lck->lk_mode == HTREE_LOCK_EX;
-+
-+ /* all writers are excluded? */
-+ return lck->lk_mode == HTREE_LOCK_PR ||
-+ lck->lk_mode == HTREE_LOCK_PW ||
-+ lck->lk_mode == HTREE_LOCK_EX;
-+}
-+
-+/* relock htree_lock with EX mode if it's change operation, otherwise
-+ * relock it with PR mode. It's noop if PDO is disabled. */
-+static void ext4_htree_safe_relock(struct htree_lock *lck)
-+{
-+ if (!ext4_htree_safe_locked(lck)) {
-+ unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
-+
-+ htree_change_lock(lck, ext4_htree_safe_mode(flags));
-+ }
-+}
-+
-+void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ struct inode *dir, unsigned flags)
-+{
-+ htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
-+ ext4_htree_safe_mode(flags);
-+
-+ ext4_htree_lock_data(lck)->ld_flags = flags;
-+ htree_lock(lck, lhead, mode);
-+ if (!is_dx(dir))
-+ ext4_htree_safe_relock(lck); /* make sure it's safe locked */
-+}
-+EXPORT_SYMBOL(ext4_htree_lock);
-+
-+static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
-+ unsigned lmask, int wait, void *ev)
-+{
-+ u32 key = (at == NULL) ? 0 : dx_get_block(at);
-+ u32 mode;
-+
-+ /* NOOP if htree is well protected or caller doesn't require the lock */
-+ if (ext4_htree_safe_locked(lck) ||
-+ !(ext4_htree_lock_data(lck)->ld_flags & lmask))
-+ return 1;
-+
-+ mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
-+ HTREE_LOCK_PW : HTREE_LOCK_PR;
-+ while (1) {
-+ if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
-+ return 1;
-+ if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
-+ return 0;
-+ cpu_relax(); /* spin until granted */
-+ }
-+}
-+
-+static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
-+{
-+ return ext4_htree_safe_locked(lck) ||
-+ htree_node_is_granted(lck, ffz(~lmask));
-+}
-+
-+static void ext4_htree_node_unlock(struct htree_lock *lck,
-+ unsigned lmask, void *buf)
-+{
-+ /* NB: it's safe to call mutiple times or even it's not locked */
-+ if (!ext4_htree_safe_locked(lck) &&
-+ htree_node_is_granted(lck, ffz(~lmask)))
-+ htree_node_unlock(lck, ffz(~lmask), buf);
-+}
-+
-+#define ext4_htree_dx_lock(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
-+#define ext4_htree_dx_lock_try(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
-+#define ext4_htree_dx_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
-+#define ext4_htree_dx_locked(lck) \
-+ ext4_htree_node_locked(lck, EXT4_LB_DX)
-+
-+static void ext4_htree_dx_need_lock(struct htree_lock *lck)
-+{
-+ struct ext4_dir_lock_data *ld;
-+
-+ if (ext4_htree_safe_locked(lck))
-+ return;
-+
-+ ld = ext4_htree_lock_data(lck);
-+ switch (ld->ld_flags) {
-+ default:
-+ return;
-+ case EXT4_HLOCK_LOOKUP:
-+ ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
-+ return;
-+ case EXT4_HLOCK_DEL:
-+ ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
-+ return;
-+ case EXT4_HLOCK_ADD:
-+ ld->ld_flags = EXT4_HLOCK_SPLIT;
-+ return;
-+ }
-+}
-+
-+#define ext4_htree_de_lock(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
-+#define ext4_htree_de_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
-+
-+#define ext4_htree_spin_lock(lck, key, event) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
-+#define ext4_htree_spin_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
-+#define ext4_htree_spin_unlock_listen(lck, p) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
-+
-+static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
-+{
-+ if (!ext4_htree_safe_locked(lck) &&
-+ htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
-+ htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
-+}
-+
-+enum {
-+ DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
-+ DX_HASH_COL_YES, /* there is collision and it does matter */
-+ DX_HASH_COL_NO, /* there is no collision */
-+};
-+
-+static int dx_probe_hash_collision(struct htree_lock *lck,
-+ struct dx_entry *entries,
-+ struct dx_entry *at, u32 hash)
-+{
-+ if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
-+ return DX_HASH_COL_IGNORE; /* don't care about collision */
-+
-+ } else if (at == entries + dx_get_count(entries) - 1) {
-+ return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
-+
-+ } else { /* hash collision? */
-+ return ((dx_get_hash(at + 1) & ~1) == hash) ?
-+ DX_HASH_COL_YES : DX_HASH_COL_NO;
-+ }
-+}
-+
- /*
- * Probe for a directory leaf block to search.
- *
-@@ -379,10 +598,11 @@ struct stats dx_show_entries(struct dx_h
- */
- static struct dx_frame *
- dx_probe(const struct qstr *d_name, struct inode *dir,
-- struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-+ struct dx_hash_info *hinfo, struct dx_frame *frame_in,
-+ struct htree_lock *lck, int *err)
- {
- unsigned count, indirect;
-- struct dx_entry *at, *entries, *p, *q, *m;
-+ struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
- struct dx_root_info * info;
- struct buffer_head *bh;
- struct dx_frame *frame = frame_in;
-@@ -447,8 +667,15 @@ dx_probe(const struct qstr *d_name, stru
- dxtrace(printk("Look up %x", hash));
- while (1)
- {
-+ if (indirect == 0) { /* the last index level */
-+ /* NB: ext4_htree_dx_lock() could be noop if
-+ * DX-lock flag is not set for current operation */
-+ ext4_htree_dx_lock(lck, dx);
-+ ext4_htree_spin_lock(lck, dx, NULL);
-+ }
- count = dx_get_count(entries);
-- if (!count || count > dx_get_limit(entries)) {
-+ if (count == 0 || count > dx_get_limit(entries)) {
-+ ext4_htree_spin_unlock(lck); /* release spin */
- ext4_warning(dir->i_sb,
- "dx entry: no count or count > limit");
- brelse(bh);
-@@ -489,9 +716,73 @@ dx_probe(const struct qstr *d_name, stru
- frame->bh = bh;
- frame->entries = entries;
- frame->at = at;
-- if (!indirect--) return frame;
-+
-+ if (indirect == 0) { /* the last index level */
-+ struct ext4_dir_lock_data *ld;
-+ u64 myblock;
-+
-+ /* By default we only lock DE-block, however, we will
-+ * also lock the last level DX-block if:
-+ * a) there is hash collision
-+ * we will set DX-lock flag (a few lines below)
-+ * and redo to lock DX-block
-+ * see detail in dx_probe_hash_collision()
-+ * b) it's a retry from splitting
-+ * we need to lock the last level DX-block so nobody
-+ * else can split any leaf blocks under the same
-+ * DX-block, see detail in ext4_dx_add_entry()
-+ */
-+ if (ext4_htree_dx_locked(lck)) {
-+ /* DX-block is locked, just lock DE-block
-+ * and return */
-+ ext4_htree_spin_unlock(lck);
-+ if (!ext4_htree_safe_locked(lck))
-+ ext4_htree_de_lock(lck, frame->at);
-+ return frame;
-+ }
-+ /* it's pdirop and no DX lock */
-+ if (dx_probe_hash_collision(lck, entries, at, hash) ==
-+ DX_HASH_COL_YES) {
-+ /* found hash collision, set DX-lock flag
-+ * and retry to abtain DX-lock */
-+ ext4_htree_spin_unlock(lck);
-+ ext4_htree_dx_need_lock(lck);
-+ continue;
-+ }
-+ ld = ext4_htree_lock_data(lck);
-+ /* because I don't lock DX, so @at can't be trusted
-+ * after I release spinlock so I have to save it */
-+ ld->ld_at = at;
-+ ld->ld_at_entry = *at;
-+ ld->ld_count = dx_get_count(entries);
-+
-+ frame->at = &ld->ld_at_entry;
-+ myblock = dx_get_block(at);
-+
-+ /* NB: ordering locking */
-+ ext4_htree_spin_unlock_listen(lck, &myblock);
-+ /* other thread can split this DE-block because:
-+ * a) I don't have lock for the DE-block yet
-+ * b) I released spinlock on DX-block
-+ * if it happened I can detect it by listening
-+ * splitting event on this DE-block */
-+ ext4_htree_de_lock(lck, frame->at);
-+ ext4_htree_spin_stop_listen(lck);
-+
-+ if (myblock == EXT4_HTREE_NODE_CHANGED) {
-+ /* someone split this DE-block before
-+ * I locked it, I need to retry and lock
-+ * valid DE-block */
-+ ext4_htree_de_unlock(lck);
-+ continue;
-+ }
-+ return frame;
-+ }
-+ dx = at;
-+ indirect--;
- if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
- goto fail2;
-+
- at = entries = ((struct dx_node *) bh->b_data)->entries;
- if (dx_get_limit(entries) != dx_node_limit (dir)) {
- ext4_warning(dir->i_sb,
-@@ -553,7 +844,7 @@ static void dx_release (struct dx_frame
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
-- __u32 *start_hash)
-+ __u32 *start_hash, struct htree_lock *lck)
- {
- struct dx_frame *p;
- struct buffer_head *bh;
-@@ -568,12 +859,22 @@ static int ext4_htree_next_block(struct
- * this loop, num_frames indicates the number of interior
- * nodes need to be read.
- */
-+ ext4_htree_de_unlock(lck);
- while (1) {
-- if (++(p->at) < p->entries + dx_get_count(p->entries))
-- break;
-+ if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
-+ /* num_frames > 0 :
-+ * DX block
-+ * ext4_htree_dx_locked:
-+ * frame->at is reliable pointer returned by dx_probe,
-+ * otherwise dx_probe already knew no collision */
-+ if (++(p->at) < p->entries + dx_get_count(p->entries))
-+ break;
-+ }
- if (p == frames)
- return 0;
- num_frames++;
-+ if (num_frames == 1)
-+ ext4_htree_dx_unlock(lck);
- p--;
- }
-
-@@ -596,6 +897,13 @@ static int ext4_htree_next_block(struct
- * block so no check is necessary
- */
- while (num_frames--) {
-+ if (num_frames == 0) {
-+ /* it's not always necessary, we just don't want to
-+ * detect hash collision again */
-+ ext4_htree_dx_need_lock(lck);
-+ ext4_htree_dx_lock(lck, p->at);
-+ }
-+
- if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
- 0, &err)))
- return err; /* Failure */
-@@ -604,6 +912,7 @@ static int ext4_htree_next_block(struct
- p->bh = bh;
- p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
- }
-+ ext4_htree_de_lock(lck, p->at);
- return 1;
- }
-
-@@ -696,10 +1005,10 @@ int ext4_htree_fill_tree(struct file *di
- }
- hinfo.hash = start_hash;
- hinfo.minor_hash = 0;
-- frame = dx_probe(NULL, dir, &hinfo, frames, &err);
-+ /* assume it's PR locked */
-+ frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
- if (!frame)
- return err;
--
- /* Add '.' and '..' from the htree header */
- if (!start_hash && !start_minor_hash) {
- de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-@@ -726,7 +1035,7 @@ int ext4_htree_fill_tree(struct file *di
- count += ret;
- hashval = ~0;
- ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
-- frame, frames, &hashval);
-+ frame, frames, &hashval, NULL);
- *next_hash = hashval;
- if (ret < 0) {
- err = ret;
-@@ -826,9 +1135,17 @@ static void dx_insert_block(struct dx_fr
-
- static void ext4_update_dx_flag(struct inode *inode)
- {
-+ /* Disable it for ldiskfs, because going from a DX directory to
-+ * a non-DX directory while it is in use will completely break
-+ * the htree-locking.
-+ * If we really want to support this operation in the future,
-+ * we need to exclusively lock the directory at here which will
-+ * increase complexity of code */
-+#if 0
- if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_COMPAT_DIR_INDEX))
- ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-+#endif
- }
-
- /*
-@@ -900,9 +1217,10 @@ static inline int search_dirblock(struct
- * The returned buffer_head has ->b_count elevated. The caller is expected
- * to brelse() it when appropriate.
- */
--static struct buffer_head * ext4_find_entry (struct inode *dir,
-+struct buffer_head * __ext4_find_entry(struct inode *dir,
- const struct qstr *d_name,
-- struct ext4_dir_entry_2 ** res_dir)
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck)
- {
- struct super_block *sb;
- struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -923,7 +1241,7 @@ static struct buffer_head * ext4_find_en
- if (namelen > EXT4_NAME_LEN)
- return NULL;
- if (is_dx(dir)) {
-- bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
-+ bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
- /*
- * On success, or if the error was file not found,
- * return. Otherwise, fall back to doing a search the
-@@ -933,6 +1251,7 @@ static struct buffer_head * ext4_find_en
- return bh;
- dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
- "falling back\n"));
-+ ext4_htree_safe_relock(lck);
- }
- nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
- start = EXT4_I(dir)->i_dir_start_lookup;
-@@ -1008,9 +1327,12 @@ cleanup_and_exit:
- brelse(bh_use[ra_ptr]);
- return ret;
- }
-+EXPORT_SYMBOL(__ext4_find_entry);
-
--static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
-- struct ext4_dir_entry_2 **res_dir, int *err)
-+static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-+ const struct qstr *d_name,
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck, int *err)
- {
- struct super_block * sb;
- struct dx_hash_info hinfo;
-@@ -1026,13 +1348,16 @@ static struct buffer_head * ext4_dx_find
- sb = dir->i_sb;
- /* NFS may look up ".." - look at dx_root directory block */
- if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
-- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-+ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
- return NULL;
- } else {
- frame = frames;
- frame->bh = NULL; /* for dx_release() */
- frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
- dx_set_block(frame->at, 0); /* dx_root block is 0 */
-+ /* "." and ".." are stored in root DX lock */
-+ ext4_htree_dx_need_lock(lck);
-+ ext4_htree_dx_lock(lck, NULL);
- }
- hash = hinfo.hash;
- do {
-@@ -1061,7 +1386,7 @@ static struct buffer_head * ext4_dx_find
- brelse(bh);
- /* Check to see if we should continue to search */
- retval = ext4_htree_next_block(dir, hash, frame,
-- frames, NULL);
-+ frames, NULL, lck);
- if (retval < 0) {
- ext4_warning(sb,
- "error reading index page in directory #%lu",
-@@ -1244,8 +1569,9 @@ static struct ext4_dir_entry_2* dx_pack_
- * Returns pointer to de in block into which the new entry will be inserted.
- */
- static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-- struct buffer_head **bh,struct dx_frame *frame,
-- struct dx_hash_info *hinfo, int *error)
-+ struct buffer_head **bh, struct dx_frame *frames,
-+ struct dx_frame *frame, struct dx_hash_info *hinfo,
-+ struct htree_lock *lck, int *error)
- {
- unsigned blocksize = dir->i_sb->s_blocksize;
- unsigned count, continued;
-@@ -1302,7 +1628,14 @@ static struct ext4_dir_entry_2 *do_split
- hash2, split, count-split));
-
- /* Fancy dance to stay within two buffers */
-- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
-+ if (hinfo->hash < hash2) {
-+ de2 = dx_move_dirents(data1, data2, map + split,
-+ count - split, blocksize);
-+ } else {
-+ /* make sure we will add entry to the same block which
-+ * we have already locked */
-+ de2 = dx_move_dirents(data1, data2, map, split, blocksize);
-+ }
- de = dx_pack_dirents(data1, blocksize);
- de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
- blocksize);
-@@ -1311,13 +1644,21 @@ static struct ext4_dir_entry_2 *do_split
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
-
-- /* Which block gets the new entry? */
-- if (hinfo->hash >= hash2)
-- {
-- swap(*bh, bh2);
-- de = de2;
-+ ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
-+ frame->at); /* notify block is being split */
-+ if (hinfo->hash < hash2) {
-+ dx_insert_block(frame, hash2 + continued, newblock);
-+
-+ } else {
-+ /* switch block number */
-+ dx_insert_block(frame, hash2 + continued,
-+ dx_get_block(frame->at));
-+ dx_set_block(frame->at, newblock);
-+ (frame->at)++;
- }
-- dx_insert_block(frame, hash2 + continued, newblock);
-+ ext4_htree_spin_unlock(lck);
-+ ext4_htree_dx_unlock(lck);
-+
- err = ext4_handle_dirty_metadata(handle, dir, bh2);
- if (err)
- goto journal_error;
-@@ -1558,8 +1899,8 @@ static int make_indexed_dir(handle_t *ha
- retval = ext4_handle_dirty_metadata(handle, dir, bh);
- if (retval)
- goto out_frames;
-
-- de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-+ de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
- if (!de)
- goto out_frames;
- dx_release(frames);
-@@ -1664,8 +2005,8 @@ out:
- * may not sleep between calling this and putting something into
- * the entry, as someone else might have used it while you slept.
- */
--static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode)
-+int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode, struct htree_lock *lck)
- {
- struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
-@@ -1684,9 +2025,10 @@ static int ext4_add_entry(handle_t *hand
- if (dentry->d_name.len == 2 &&
- memcmp(dentry->d_name.name, "..", 2) == 0)
- return ext4_update_dotdot(handle, dentry, inode);
-- retval = ext4_dx_add_entry(handle, dentry, inode);
-+ retval = ext4_dx_add_entry(handle, dentry, inode, lck);
- if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
-+ ext4_htree_safe_relock(lck);
- ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
- dx_fallback++;
- ext4_mark_inode_dirty(handle, dir);
-@@ -1717,12 +2059,13 @@ static int ext4_add_entry(handle_t *hand
- brelse(bh);
- return retval;
- }
-+EXPORT_SYMBOL(__ext4_add_entry);
-
- /*
- * Returns 0 for success, or a negative error value
- */
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode)
-+ struct inode *inode, struct htree_lock *lck)
- {
- struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct dx_entry *entries, *at;
-@@ -1736,7 +2079,7 @@ static int ext4_dx_add_entry(handle_t *h
-
- again:
- restart = 0;
-- frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-+ frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
- if (!frame)
- return err;
- entries = frame->entries;
-@@ -1763,6 +2106,11 @@ again:
- struct dx_node *node2;
- struct buffer_head *bh2;
-
-+ if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
-+ ext4_htree_safe_relock(lck);
-+ restart = 1;
-+ goto cleanup;
-+ }
- while (frame > frames) {
- if (dx_get_count((frame - 1)->entries) <
- dx_get_limit((frame - 1)->entries)) {
-@@ -1860,16 +2208,43 @@ again:
- restart = 1;
- goto cleanup;
- }
-+ } else if (!ext4_htree_dx_locked(lck)) {
-+ struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
-+
-+ /* not well protected, require DX lock */
-+ ext4_htree_dx_need_lock(lck);
-+ at = frame > frames ? (frame - 1)->at : NULL;
-+
-+ /* NB: no risk of deadlock because it's just a try.
-+ *
-+ * NB: we check ld_count for twice, the first time before
-+ * having DX lock, the second time after holding DX lock.
-+ *
-+ * NB: We never free blocks for directory so far, which
-+ * means value returned by dx_get_count() should equal to
-+ * ld->ld_count if nobody split any DE-block under @at,
-+ * and ld->ld_at still points to valid dx_entry. */
-+ if ((ld->ld_count != dx_get_count(entries)) ||
-+ !ext4_htree_dx_lock_try(lck, at) ||
-+ (ld->ld_count != dx_get_count(entries))) {
-+ restart = 1;
-+ goto cleanup;
-+ }
-+ /* OK, I've got DX lock and nothing changed */
-+ frame->at = ld->ld_at;
- }
-- de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-+ de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
- if (!de)
- goto cleanup;
-+
- err = add_dirent_to_buf(handle, dentry, inode, de, bh);
- goto cleanup;
-
- journal_error:
- ext4_std_error(dir->i_sb, err);
- cleanup:
-+ ext4_htree_dx_unlock(lck);
-+ ext4_htree_de_unlock(lck);
- if (bh)
- brelse(bh);
- dx_release(frames);
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/Makefile
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
-
- ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+ htree_lock.o \
- ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o
-
+++ /dev/null
-commit 18aadd47f88464928b5ce57791c2e8f9f2aaece0 (v3.3-rc2-7-g18aadd4)
-Author: Bobi Jam <bobijam@whamcloud.com>
-Date: Mon Feb 20 17:53:02 2012 -0500
-
-ext4: expand commit callback and use it for mballoc
-
-The per-commit callback was used by mballoc code to manage free space
-bitmaps after deleted blocks have been released. This patch expands
-it to support multiple different callbacks, to allow other things to
-be done after the commit has been completed.
-
-Signed-off-by: Bobi Jam <bobijam@whamcloud.com>
-Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
-Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-
-Index: linux-stage/fs/ext4/ext4_jbd2.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4_jbd2.h
-+++ linux-stage/fs/ext4/ext4_jbd2.h
-@@ -104,6 +104,80 @@
- #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
- #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
-
-+/**
-+ * struct ext4_journal_cb_entry - Base structure for callback information.
-+ *
-+ * This struct is a 'seed' structure for a using with your own callback
-+ * structs. If you are using callbacks you must allocate one of these
-+ * or another struct of your own definition which has this struct
-+ * as it's first element and pass it to ext4_journal_callback_add().
-+ */
-+struct ext4_journal_cb_entry {
-+ /* list information for other callbacks attached to the same handle */
-+ struct list_head jce_list;
-+
-+ /* Function to call with this callback structure */
-+ void (*jce_func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce, int error);
-+
-+ /* user data goes here */
-+};
-+
-+/**
-+ * ext4_journal_callback_add: add a function to call after transaction commit
-+ * @handle: active journal transaction handle to register callback on
-+ * @func: callback function to call after the transaction has committed:
-+ * @sb: superblock of current filesystem for transaction
-+ * @jce: returned journal callback data
-+ * @rc: journal state at commit (0 = transaction committed properly)
-+ * @jce: journal callback data (internal and function private data struct)
-+ *
-+ * The registered function will be called in the context of the journal thread
-+ * after the transaction for which the handle was created has completed.
-+ *
-+ * No locks are held when the callback function is called, so it is safe to
-+ * call blocking functions from within the callback, but the callback should
-+ * not block or run for too long, or the filesystem will be blocked waiting for
-+ * the next transaction to commit. No journaling functions can be used, or
-+ * there is a risk of deadlock.
-+ *
-+ * There is no guaranteed calling order of multiple registered callbacks on
-+ * the same transaction.
-+ */
-+static inline void ext4_journal_callback_add(handle_t *handle,
-+ void (*func)(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc),
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ /* Add the jce to transaction's private list */
-+ jce->jce_func = func;
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&jce->jce_list, &handle->h_transaction->t_private_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+/**
-+ * ext4_journal_callback_del: delete a registered callback
-+ * @handle: active journal transaction handle on which callback was registered
-+ * @jce: registered journal callback entry to unregister
-+ */
-+static inline void ext4_journal_callback_del(handle_t *handle,
-+ struct ext4_journal_cb_entry *jce)
-+{
-+ struct ext4_sb_info *sbi =
-+ EXT4_SB(handle->h_transaction->t_journal->j_private);
-+
-+ spin_lock(&sbi->s_md_lock);
-+ list_del_init(&jce->jce_list);
-+ spin_unlock(&sbi->s_md_lock);
-+}
-+
-+#define HAVE_EXT4_JOURNAL_CALLBACK_ADD
-+
- int
- ext4_mark_iloc_dirty(handle_t *handle,
- struct inode *inode,
-Index: linux-stage/fs/ext4/mballoc.h
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.h
-+++ linux-stage/fs/ext4/mballoc.h
-@@ -96,23 +96,24 @@ extern u8 mb_enable_debug;
- */
- #define MB_DEFAULT_GROUP_PREALLOC 512
-
--
- struct ext4_free_data {
-- /* this links the free block information from group_info */
-- struct rb_node node;
-+ /* MUST be the first member */
-+ struct ext4_journal_cb_entry efd_jce;
-
-- /* this links the free block information from ext4_sb_info */
-- struct list_head list;
-+ /* ext4_free_data private data starts from here */
-+
-+ /* this links the free block information from group_info */
-+ struct rb_node efd_node;
-
- /* group which free block extent belongs */
-- ext4_group_t group;
-+ ext4_group_t efd_group;
-
- /* free block extent */
-- ext4_grpblk_t start_blk;
-- ext4_grpblk_t count;
-+ ext4_grpblk_t efd_start_blk;
-+ ext4_grpblk_t efd_count;
-
- /* transaction which freed this extent */
-- tid_t t_tid;
-+ tid_t efd_tid;
- };
-
- struct ext4_prealloc_space {
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -21,6 +21,7 @@
- * mballoc.c contains the multiblocks allocation routines
- */
-
-+#include "ext4_jbd2.h"
- #include "mballoc.h"
- #include <linux/debugfs.h>
- #include <trace/events/ext4.h>
-@@ -336,12 +337,12 @@
- */
- static struct kmem_cache *ext4_pspace_cachep;
- static struct kmem_cache *ext4_ac_cachep;
--static struct kmem_cache *ext4_free_ext_cachep;
-+static struct kmem_cache *ext4_free_data_cachep;
- static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group);
- static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group);
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
-+static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error);
-
- static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
- {
-@@ -2583,8 +2584,6 @@ int ext4_mb_init(struct super_block *sb,
- }
- }
-
-- if (sbi->s_journal)
-- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
- return 0;
- }
-
-@@ -2686,58 +2685,54 @@ static inline int ext4_issue_discard(str
- * This function is called by the jbd2 layer once the commit has finished,
- * so we know we can free the blocks that were released with that commit.
- */
--static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
-+static void ext4_free_data_callback(struct super_block *sb,
-+ struct ext4_journal_cb_entry *jce,
-+ int rc)
- {
-- struct super_block *sb = journal->j_private;
-+ struct ext4_free_data *entry = (struct ext4_free_data *)jce;
- struct ext4_buddy e4b;
- struct ext4_group_info *db;
- int err, count = 0, count2 = 0;
-- struct ext4_free_data *entry;
-- struct list_head *l, *ltmp;
-
-- list_for_each_safe(l, ltmp, &txn->t_private_list) {
-- entry = list_entry(l, struct ext4_free_data, list);
-+ mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-+ entry->efd_count, entry->efd_group, entry);
-
-- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
-- entry->count, entry->group, entry);
-+ if (test_opt(sb, DISCARD))
-+ ext4_issue_discard(sb, entry->efd_group,
-+ entry->efd_start_blk, entry->efd_count);
-+
-+ err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ db = e4b.bd_info;
-+ /* there are blocks to put in buddy to make them really free */
-+ count += entry->efd_count;
-+ count2++;
-+ ext4_lock_group(sb, entry->efd_group);
-+ /* Take it out of per group rb tree */
-+ rb_erase(&entry->efd_node, &(db->bb_free_root));
-+ mb_free_blocks(NULL, &e4b, entry->efd_start_blk, entry->efd_count);
-
-- if (test_opt(sb, DISCARD))
-- ext4_issue_discard(sb, entry->group,
-- entry->start_blk, entry->count);
--
-- err = ext4_mb_load_buddy(sb, entry->group, &e4b);
-- /* we expect to find existing buddy because it's pinned */
-- BUG_ON(err != 0);
--
-- db = e4b.bd_info;
-- /* there are blocks to put in buddy to make them really free */
-- count += entry->count;
-- count2++;
-- ext4_lock_group(sb, entry->group);
-- /* Take it out of per group rb tree */
-- rb_erase(&entry->node, &(db->bb_free_root));
-- mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
-+ /*
-+ * Clear the trimmed flag for the group so that the next
-+ * ext4_trim_fs can trim it.
-+ * If the volume is mounted with -o discard, online discard
-+ * is supported and the free blocks will be trimmed online.
-+ */
-+ if (!test_opt(sb, DISCARD))
-+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
-
-- /*
-- * Clear the trimmed flag for the group so that the next
-- * ext4_trim_fs can trim it.
-- * If the volume is mounted with -o discard, online discard
-- * is supported and the free blocks will be trimmed online.
-+ if (!db->bb_free_root.rb_node) {
-+ /* No more items in the per group rb tree
-+ * balance refcounts from ext4_mb_free_metadata()
- */
-- if (!test_opt(sb, DISCARD))
-- EXT4_MB_GRP_CLEAR_TRIMMED(db);
--
-- if (!db->bb_free_root.rb_node) {
-- /* No more items in the per group rb tree
-- * balance refcounts from ext4_mb_free_metadata()
-- */
-- page_cache_release(e4b.bd_buddy_page);
-- page_cache_release(e4b.bd_bitmap_page);
-- }
-- ext4_unlock_group(sb, entry->group);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-- ext4_mb_release_desc(&e4b);
-+ page_cache_release(e4b.bd_buddy_page);
-+ page_cache_release(e4b.bd_bitmap_page);
- }
-+ ext4_unlock_group(sb, entry->efd_group);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
-+ ext4_mb_release_desc(&e4b);
-
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
- }
-@@ -2789,22 +2784,22 @@ int __init init_ext4_mballoc(void)
- kmem_cache_create("ext4_alloc_context",
- sizeof(struct ext4_allocation_context),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_ac_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- return -ENOMEM;
-- }
-+ if (ext4_ac_cachep == NULL)
-+ goto out_err;
-+
-+ ext4_free_data_cachep =
-+ KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT);
-+ if (ext4_free_data_cachep == NULL)
-+ goto out1_err;
-
-- ext4_free_ext_cachep =
-- kmem_cache_create("ext4_free_block_extents",
-- sizeof(struct ext4_free_data),
-- 0, SLAB_RECLAIM_ACCOUNT, NULL);
-- if (ext4_free_ext_cachep == NULL) {
-- kmem_cache_destroy(ext4_pspace_cachep);
-- kmem_cache_destroy(ext4_ac_cachep);
-- return -ENOMEM;
-- }
- ext4_create_debugfs_entry();
- return 0;
-+
-+out1_err:
-+ kmem_cache_destroy(ext4_ac_cachep);
-+out_err:
-+ kmem_cache_destroy(ext4_pspace_cachep);
-+ return -ENOMEM;
- }
-
- void exit_ext4_mballoc(void)
-@@ -2816,7 +2811,7 @@ void exit_ext4_mballoc(void)
- rcu_barrier();
- kmem_cache_destroy(ext4_pspace_cachep);
- kmem_cache_destroy(ext4_ac_cachep);
-- kmem_cache_destroy(ext4_free_ext_cachep);
-+ kmem_cache_destroy(ext4_free_data_cachep);
- ext4_remove_debugfs_entry();
- }
-
-@@ -3375,8 +3370,8 @@ static void ext4_mb_generate_from_freeli
- n = rb_first(&(grp->bb_free_root));
-
- while (n) {
-- entry = rb_entry(n, struct ext4_free_data, node);
-- mb_set_bits(bitmap, entry->start_blk, entry->count);
-+ entry = rb_entry(n, struct ext4_free_data, efd_node);
-+ mb_set_bits(bitmap, entry->efd_start_blk, entry->efd_count);
- n = rb_next(n);
- }
- return;
-@@ -4631,11 +4626,11 @@ out:
- * AND the blocks are associated with the same group.
- */
- static int can_merge(struct ext4_free_data *entry1,
-- struct ext4_free_data *entry2)
-+ struct ext4_free_data *entry2)
- {
-- if ((entry1->t_tid == entry2->t_tid) &&
-- (entry1->group == entry2->group) &&
-- ((entry1->start_blk + entry1->count) == entry2->start_blk))
-+ if ((entry1->efd_tid == entry2->efd_tid) &&
-+ (entry1->efd_group == entry2->efd_group) &&
-+ ((entry1->efd_start_blk + entry1->efd_count) == entry2->efd_start_blk))
- return 1;
- return 0;
- }
-@@ -4648,7 +4643,6 @@ ext4_mb_free_metadata(handle_t *handle,
- struct ext4_free_data *entry;
- struct ext4_group_info *db = e4b->bd_info;
- struct super_block *sb = e4b->bd_sb;
-- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct rb_node **n = &db->bb_free_root.rb_node, *node;
- struct rb_node *parent = NULL, *new_node;
-
-@@ -4656,8 +4650,8 @@ ext4_mb_free_metadata(handle_t *handle,
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
-
-- new_node = &new_entry->node;
-- block = new_entry->start_blk;
-+ new_node = &new_entry->efd_node;
-+ block = new_entry->efd_start_blk;
-
- if (!*n) {
- /* first free block exent. We need to
-@@ -4670,15 +4664,15 @@ ext4_mb_free_metadata(handle_t *handle,
- }
- while (*n) {
- parent = *n;
-- entry = rb_entry(parent, struct ext4_free_data, node);
-- if (block < entry->start_blk)
-+ entry = rb_entry(parent, struct ext4_free_data, efd_node);
-+ if (block < entry->efd_start_blk)
- n = &(*n)->rb_left;
-- else if (block >= (entry->start_blk + entry->count))
-+ else if (block >= (entry->efd_start_blk + entry->efd_count))
- n = &(*n)->rb_right;
- else {
- ext4_grp_locked_error(sb, e4b->bd_group, __func__,
- "Double free of blocks %d (%d %d)",
-- block, entry->start_blk, entry->count);
-+ block, entry->efd_start_blk, entry->efd_count);
- return 0;
- }
- }
-@@ -4689,34 +4683,29 @@ ext4_mb_free_metadata(handle_t *handle,
- /* Now try to see the extent can be merged to left and right */
- node = rb_prev(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(entry, new_entry)) {
-- new_entry->start_blk = entry->start_blk;
-- new_entry->count += entry->count;
-+ new_entry->efd_start_blk = entry->efd_start_blk;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
-
- node = rb_next(new_node);
- if (node) {
-- entry = rb_entry(node, struct ext4_free_data, node);
-+ entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(new_entry, entry)) {
-- new_entry->count += entry->count;
-+ new_entry->efd_count += entry->efd_count;
- rb_erase(node, &(db->bb_free_root));
-- spin_lock(&sbi->s_md_lock);
-- list_del(&entry->list);
-- spin_unlock(&sbi->s_md_lock);
-- kmem_cache_free(ext4_free_ext_cachep, entry);
-+ ext4_journal_callback_del(handle, &entry->efd_jce);
-+ kmem_cache_free(ext4_free_data_cachep, entry);
- }
- }
- /* Add the extent to transaction's private list */
-- spin_lock(&sbi->s_md_lock);
-- list_add(&new_entry->list, &handle->h_transaction->t_private_list);
-- spin_unlock(&sbi->s_md_lock);
-+ ext4_journal_callback_add(handle, ext4_free_data_callback,
-+ &new_entry->efd_jce);
- return 0;
- }
-
-@@ -4851,14 +4840,14 @@ do_more:
- * be used until this transaction is committed
- *
- * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
- * to fail.
- */
-- new_entry = kmem_cache_alloc(ext4_free_ext_cachep,
-+ new_entry = kmem_cache_alloc(ext4_free_data_cachep,
- GFP_NOFS|__GFP_NOFAIL);
-- new_entry->start_blk = bit;
-- new_entry->group = block_group;
-- new_entry->count = count;
-- new_entry->t_tid = handle->h_transaction->t_tid;
-+ new_entry->efd_start_blk = bit;
-+ new_entry->efd_group = block_group;
-+ new_entry->efd_count = count;
-+ new_entry->efd_tid = handle->h_transaction->t_tid;
-
- ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count);
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -336,6 +336,18 @@ void ext4_journal_abort_handle(const cha
- jbd2_journal_abort_handle(handle);
- }
-
-+static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
-+{
-+ struct super_block *sb = journal->j_private;
-+ int error = is_journal_aborted(journal);
-+ struct ext4_journal_cb_entry *jce, *tmp;
-+
-+ list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
-+ list_del_init(&jce->jce_list);
-+ jce->jce_func(sb, jce, error);
-+ }
-+}
-+
- /* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
-@@ -3492,6 +3504,8 @@ static int ext4_fill_super(struct super_
- ext4_count_dirs(sb));
- percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
-
-+ sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
-+
- no_journal:
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
+++ /dev/null
-Index: linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
-===================================================================
---- /dev/null
-+++ linux-2.6.32-504.3.3.el6.x86_64/include/linux/htree_lock.h
-@@ -0,0 +1,187 @@
-+/*
-+ * include/linux/htree_lock.h
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+
-+/*
-+ * htree lock
-+ *
-+ * htree_lock is an advanced lock, it can support five lock modes (concept is
-+ * taken from DLM) and it's a sleeping lock.
-+ *
-+ * most common use case is:
-+ * - create a htree_lock_head for data
-+ * - each thread (contender) creates it's own htree_lock
-+ * - contender needs to call htree_lock(lock_node, mode) to protect data and
-+ * call htree_unlock to release lock
-+ *
-+ * Also, there is advanced use-case which is more complex, user can have
-+ * PW/PR lock on particular key, it's mostly used while user holding shared
-+ * lock on the htree (CW, CR)
-+ *
-+ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
-+ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
-+ * ...
-+ * htree_node_unlock(lock_node);; unlock the key
-+ *
-+ * Another tip is, we can have N-levels of this kind of keys, all we need to
-+ * do is specifying N-levels while creating htree_lock_head, then we can
-+ * lock/unlock a specific level by:
-+ * htree_node_lock(lock_node, mode1, key1, level1...);
-+ * do something;
-+ * htree_node_lock(lock_node, mode1, key2, level2...);
-+ * do something;
-+ * htree_node_unlock(lock_node, level2);
-+ * htree_node_unlock(lock_node, level1);
-+ *
-+ * NB: for multi-level, should be careful about locking order to avoid deadlock
-+ */
-+
-+#ifndef _LINUX_HTREE_LOCK_H
-+#define _LINUX_HTREE_LOCK_H
-+
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Lock Modes
-+ * more details can be found here:
-+ * http://en.wikipedia.org/wiki/Distributed_lock_manager
-+ */
-+typedef enum {
-+ HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */
-+ HTREE_LOCK_PW, /* protected write: allows only CR users */
-+ HTREE_LOCK_PR, /* protected read: allow PR, CR users */
-+ HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */
-+ HTREE_LOCK_CR, /* concurrent read: allow all but EX users */
-+ HTREE_LOCK_MAX, /* number of lock modes */
-+} htree_lock_mode_t;
-+
-+#define HTREE_LOCK_NL HTREE_LOCK_MAX
-+#define HTREE_LOCK_INVAL 0xdead10c
-+
-+enum {
-+ HTREE_HBITS_MIN = 2,
-+ HTREE_HBITS_DEF = 14,
-+ HTREE_HBITS_MAX = 32,
-+};
-+
-+enum {
-+ HTREE_EVENT_DISABLE = (0),
-+ HTREE_EVENT_RD = (1 << HTREE_LOCK_PR),
-+ HTREE_EVENT_WR = (1 << HTREE_LOCK_PW),
-+ HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR),
-+};
-+
-+struct htree_lock;
-+
-+typedef void (*htree_event_cb_t)(void *target, void *event);
-+
-+struct htree_lock_child {
-+ struct list_head lc_list; /* granted list */
-+ htree_event_cb_t lc_callback; /* event callback */
-+ unsigned lc_events; /* event types */
-+};
-+
-+struct htree_lock_head {
-+ unsigned long lh_lock; /* bits lock */
-+ /* blocked lock list (htree_lock) */
-+ struct list_head lh_blocked_list;
-+ /* # key levels */
-+ u16 lh_depth;
-+ /* hash bits for key and limit number of locks */
-+ u16 lh_hbits;
-+ /* counters for blocked locks */
-+ u16 lh_nblocked[HTREE_LOCK_MAX];
-+ /* counters for granted locks */
-+ u16 lh_ngranted[HTREE_LOCK_MAX];
-+ /* private data */
-+ void *lh_private;
-+ /* array of children locks */
-+ struct htree_lock_child lh_children[0];
-+};
-+
-+/* htree_lock_node_t is child-lock for a specific key (ln_value) */
-+struct htree_lock_node {
-+ htree_lock_mode_t ln_mode;
-+ /* major hash key */
-+ u16 ln_major_key;
-+ /* minor hash key */
-+ u16 ln_minor_key;
-+ struct list_head ln_major_list;
-+ struct list_head ln_minor_list;
-+ /* alive list, all locks (granted, blocked, listening) are on it */
-+ struct list_head ln_alive_list;
-+ /* blocked list */
-+ struct list_head ln_blocked_list;
-+ /* granted list */
-+ struct list_head ln_granted_list;
-+ void *ln_ev_target;
-+};
-+
-+struct htree_lock {
-+ struct task_struct *lk_task;
-+ struct htree_lock_head *lk_head;
-+ void *lk_private;
-+ unsigned lk_depth;
-+ htree_lock_mode_t lk_mode;
-+ struct list_head lk_blocked_list;
-+ struct htree_lock_node lk_nodes[0];
-+};
-+
-+/* create a lock head, which stands for a resource */
-+struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
-+ unsigned hbits, unsigned priv);
-+/* free a lock head */
-+void htree_lock_head_free(struct htree_lock_head *lhead);
-+/* register event callback for child lock at level @depth */
-+void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
-+ unsigned events, htree_event_cb_t callback);
-+/* create a lock handle, which stands for a thread */
-+struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
-+/* free a lock handle */
-+void htree_lock_free(struct htree_lock *lck);
-+/* lock htree, when @wait is true, 0 is returned if the lock can't
-+ * be granted immediately */
-+int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ htree_lock_mode_t mode, int wait);
-+/* unlock htree */
-+void htree_unlock(struct htree_lock *lck);
-+/* unlock and relock htree with @new_mode */
-+int htree_change_lock_try(struct htree_lock *lck,
-+ htree_lock_mode_t new_mode, int wait);
-+void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
-+/* require child lock (key) of htree at level @dep, @event will be sent to all
-+ * listeners on this @key while lock being granted */
-+int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+ u32 key, unsigned dep, int wait, void *event);
-+/* release child lock at level @dep, this lock will listen on it's key
-+ * if @event isn't NULL, event_cb will be called against @lck while granting
-+ * any other lock at level @dep with the same key */
-+void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
-+/* stop listening on child lock at level @dep */
-+void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
-+/* for debug */
-+void htree_lock_stat_print(int depth);
-+void htree_lock_stat_reset(void);
-+
-+#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1)
-+#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1)
-+
-+#define htree_lock_mode(lck) ((lck)->lk_mode)
-+
-+#define htree_node_lock(lck, mode, key, dep) \
-+ htree_node_lock_try(lck, mode, key, dep, 1, NULL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_granted(lck, dep) \
-+ ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
-+ (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
-+/* this is only safe in thread context of lock owner */
-+#define htree_node_is_listening(lck, dep) \
-+ ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
-+
-+#endif
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
-===================================================================
---- /dev/null
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,891 @@
-+/*
-+ * fs/ext4/htree_lock.c
-+ *
-+ * Copyright (c) 2011, 2012, Intel Corporation.
-+ *
-+ * Author: Liang Zhen <liang@whamcloud.com>
-+ */
-+#include <linux/jbd2.h>
-+#include <linux/hash.h>
-+#include <linux/module.h>
-+#include <linux/htree_lock.h>
-+
-+enum {
-+ HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX),
-+ HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW),
-+ HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR),
-+ HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW),
-+ HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR),
-+};
-+
-+enum {
-+ HTREE_LOCK_COMPAT_EX = 0,
-+ HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
-+ HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
-+ HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
-+ HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
-+ HTREE_LOCK_BIT_PW,
-+};
-+
-+static int htree_lock_compat[] = {
-+ [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX,
-+ [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW,
-+ [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR,
-+ [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW,
-+ [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR,
-+};
-+
-+/* max allowed htree-lock depth.
-+ * We only need depth=3 for ext4 although user can have higher value. */
-+#define HTREE_LOCK_DEP_MAX 16
-+
-+#ifdef HTREE_LOCK_DEBUG
-+
-+static char *hl_name[] = {
-+ [HTREE_LOCK_EX] "EX",
-+ [HTREE_LOCK_PW] "PW",
-+ [HTREE_LOCK_PR] "PR",
-+ [HTREE_LOCK_CW] "CW",
-+ [HTREE_LOCK_CR] "CR",
-+};
-+
-+/* lock stats */
-+struct htree_lock_node_stats {
-+ unsigned long long blocked[HTREE_LOCK_MAX];
-+ unsigned long long granted[HTREE_LOCK_MAX];
-+ unsigned long long retried[HTREE_LOCK_MAX];
-+ unsigned long long events;
-+};
-+
-+struct htree_lock_stats {
-+ struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX];
-+ unsigned long long granted[HTREE_LOCK_MAX];
-+ unsigned long long blocked[HTREE_LOCK_MAX];
-+};
-+
-+static struct htree_lock_stats hl_stats;
-+
-+void htree_lock_stat_reset(void)
-+{
-+ memset(&hl_stats, 0, sizeof(hl_stats));
-+}
-+
-+void htree_lock_stat_print(int depth)
-+{
-+ int i;
-+ int j;
-+
-+ printk(KERN_DEBUG "HTREE LOCK STATS:\n");
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
-+ hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
-+ }
-+ for (i = 0; i < depth; i++) {
-+ printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
-+ for (j = 0; j < HTREE_LOCK_MAX; j++) {
-+ printk(KERN_DEBUG
-+ "[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
-+ hl_name[j], hl_stats.nodes[i].granted[j],
-+ hl_stats.nodes[i].blocked[j],
-+ hl_stats.nodes[i].retried[j]);
-+ }
-+ }
-+}
-+
-+#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0)
-+#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0)
-+#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0)
-+#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0)
-+#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0)
-+#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0)
-+
-+#else /* !DEBUG */
-+
-+void htree_lock_stat_reset(void) {}
-+void htree_lock_stat_print(int depth) {}
-+
-+#define lk_grant_inc(m) do {} while (0)
-+#define lk_block_inc(m) do {} while (0)
-+#define ln_grant_inc(d, m) do {} while (0)
-+#define ln_block_inc(d, m) do {} while (0)
-+#define ln_retry_inc(d, m) do {} while (0)
-+#define ln_event_inc(d) do {} while (0)
-+
-+#endif /* DEBUG */
-+
-+EXPORT_SYMBOL(htree_lock_stat_reset);
-+EXPORT_SYMBOL(htree_lock_stat_print);
-+
-+#define HTREE_DEP_ROOT (-1)
-+
-+#define htree_spin_lock(lhead, dep) \
-+ bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
-+#define htree_spin_unlock(lhead, dep) \
-+ bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
-+
-+#define htree_key_event_ignore(child, ln) \
-+ (!((child)->lc_events & (1 << (ln)->ln_mode)))
-+
-+static int
-+htree_key_list_empty(struct htree_lock_node *ln)
-+{
-+ return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
-+}
-+
-+static void
-+htree_key_list_del_init(struct htree_lock_node *ln)
-+{
-+ struct htree_lock_node *tmp = NULL;
-+
-+ if (!list_empty(&ln->ln_minor_list)) {
-+ tmp = list_entry(ln->ln_minor_list.next,
-+ struct htree_lock_node, ln_minor_list);
-+ list_del_init(&ln->ln_minor_list);
-+ }
-+
-+ if (list_empty(&ln->ln_major_list))
-+ return;
-+
-+ if (tmp == NULL) { /* not on minor key list */
-+ list_del_init(&ln->ln_major_list);
-+ } else {
-+ BUG_ON(!list_empty(&tmp->ln_major_list));
-+ list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
-+ }
-+}
-+
-+static void
-+htree_key_list_replace_init(struct htree_lock_node *old,
-+ struct htree_lock_node *new)
-+{
-+ if (!list_empty(&old->ln_major_list))
-+ list_replace_init(&old->ln_major_list, &new->ln_major_list);
-+
-+ if (!list_empty(&old->ln_minor_list))
-+ list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
-+}
-+
-+static void
-+htree_key_event_enqueue(struct htree_lock_child *child,
-+ struct htree_lock_node *ln, int dep, void *event)
-+{
-+ struct htree_lock_node *tmp;
-+
-+ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+ BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
-+ if (event == NULL || htree_key_event_ignore(child, ln))
-+ return;
-+
-+ /* shouldn't be a very long list */
-+ list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
-+ if (tmp->ln_mode == HTREE_LOCK_NL) {
-+ ln_event_inc(dep);
-+ if (child->lc_callback != NULL)
-+ child->lc_callback(tmp->ln_ev_target, event);
-+ }
-+ }
-+}
-+
-+static int
-+htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
-+ unsigned dep, int wait, void *event)
-+{
-+ struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
-+ struct htree_lock_node *newln = &newlk->lk_nodes[dep];
-+ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+
-+ /* NB: ALWAYS called holding lhead::lh_lock(dep) */
-+ /* NB: we only expect PR/PW lock mode at here, only these two modes are
-+ * allowed for htree_node_lock(asserted in htree_node_lock_internal),
-+ * NL is only used for listener, user can't directly require NL mode */
-+ if ((curln->ln_mode == HTREE_LOCK_NL) ||
-+ (curln->ln_mode != HTREE_LOCK_PW &&
-+ newln->ln_mode != HTREE_LOCK_PW)) {
-+ /* no conflict, attach it on granted list of @curlk */
-+ if (curln->ln_mode != HTREE_LOCK_NL) {
-+ list_add(&newln->ln_granted_list,
-+ &curln->ln_granted_list);
-+ } else {
-+ /* replace key owner */
-+ htree_key_list_replace_init(curln, newln);
-+ }
-+
-+ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+ htree_key_event_enqueue(child, newln, dep, event);
-+ ln_grant_inc(dep, newln->ln_mode);
-+ return 1; /* still hold lh_lock */
-+ }
-+
-+ if (!wait) { /* can't grant and don't want to wait */
-+ ln_retry_inc(dep, newln->ln_mode);
-+ newln->ln_mode = HTREE_LOCK_INVAL;
-+ return -1; /* don't wait and just return -1 */
-+ }
-+
-+ newlk->lk_task = current;
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ /* conflict, attach it on blocked list of curlk */
-+ list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
-+ list_add(&newln->ln_alive_list, &curln->ln_alive_list);
-+ ln_block_inc(dep, newln->ln_mode);
-+
-+ htree_spin_unlock(newlk->lk_head, dep);
-+ /* wait to be given the lock */
-+ if (newlk->lk_task != NULL)
-+ schedule();
-+ /* granted, no doubt, wake up will set me RUNNING */
-+ if (event == NULL || htree_key_event_ignore(child, newln))
-+ return 0; /* granted without lh_lock */
-+
-+ htree_spin_lock(newlk->lk_head, dep);
-+ htree_key_event_enqueue(child, newln, dep, event);
-+ return 1; /* still hold lh_lock */
-+}
-+
-+/*
-+ * get PR/PW access to particular tree-node according to @dep and @key,
-+ * it will return -1 if @wait is false and can't immediately grant this lock.
-+ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
-+ * @event if it's not NULL.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
-+ htree_lock_mode_t mode, u32 key, unsigned dep,
-+ int wait, void *event)
-+{
-+ LIST_HEAD (list);
-+ struct htree_lock *tmp;
-+ struct htree_lock *tmp2;
-+ u16 major;
-+ u16 minor;
-+ u8 reverse;
-+ u8 ma_bits;
-+ u8 mi_bits;
-+
-+ BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
-+ BUG_ON(htree_node_is_granted(lck, dep));
-+
-+ key = hash_long(key, lhead->lh_hbits);
-+
-+ mi_bits = lhead->lh_hbits >> 1;
-+ ma_bits = lhead->lh_hbits - mi_bits;
-+
-+ lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
-+ lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
-+ lck->lk_nodes[dep].ln_mode = mode;
-+
-+ /*
-+ * The major key list is an ordered list, so searches are started
-+ * at the end of the list that is numerically closer to major_key,
-+ * so at most half of the list will be walked (for well-distributed
-+ * keys). The list traversal aborts early if the expected key
-+ * location is passed.
-+ */
-+ reverse = (major >= (1 << (ma_bits - 1)));
-+
-+ if (reverse) {
-+ list_for_each_entry_reverse(tmp,
-+ &lhead->lh_children[dep].lc_list,
-+ lk_nodes[dep].ln_major_list) {
-+ if (tmp->lk_nodes[dep].ln_major_key == major) {
-+ goto search_minor;
-+
-+ } else if (tmp->lk_nodes[dep].ln_major_key < major) {
-+ /* attach _after_ @tmp */
-+ list_add(&lck->lk_nodes[dep].ln_major_list,
-+ &tmp->lk_nodes[dep].ln_major_list);
-+ goto out_grant_major;
-+ }
-+ }
-+
-+ list_add(&lck->lk_nodes[dep].ln_major_list,
-+ &lhead->lh_children[dep].lc_list);
-+ goto out_grant_major;
-+
-+ } else {
-+ list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
-+ lk_nodes[dep].ln_major_list) {
-+ if (tmp->lk_nodes[dep].ln_major_key == major) {
-+ goto search_minor;
-+
-+ } else if (tmp->lk_nodes[dep].ln_major_key > major) {
-+ /* insert _before_ @tmp */
-+ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+ &tmp->lk_nodes[dep].ln_major_list);
-+ goto out_grant_major;
-+ }
-+ }
-+
-+ list_add_tail(&lck->lk_nodes[dep].ln_major_list,
-+ &lhead->lh_children[dep].lc_list);
-+ goto out_grant_major;
-+ }
-+
-+ search_minor:
-+ /*
-+ * NB: minor_key list doesn't have a "head", @list is just a
-+ * temporary stub for helping list searching, make sure it's removed
-+ * after searching.
-+ * minor_key list is an ordered list too.
-+ */
-+ list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
-+
-+ reverse = (minor >= (1 << (mi_bits - 1)));
-+
-+ if (reverse) {
-+ list_for_each_entry_reverse(tmp2, &list,
-+ lk_nodes[dep].ln_minor_list) {
-+ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+ goto out_enqueue;
-+
-+ } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
-+ /* attach _after_ @tmp2 */
-+ list_add(&lck->lk_nodes[dep].ln_minor_list,
-+ &tmp2->lk_nodes[dep].ln_minor_list);
-+ goto out_grant_minor;
-+ }
-+ }
-+
-+ list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
-+
-+ } else {
-+ list_for_each_entry(tmp2, &list,
-+ lk_nodes[dep].ln_minor_list) {
-+ if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
-+ goto out_enqueue;
-+
-+ } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
-+ /* insert _before_ @tmp2 */
-+ list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
-+ &tmp2->lk_nodes[dep].ln_minor_list);
-+ goto out_grant_minor;
-+ }
-+ }
-+
-+ list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
-+ }
-+
-+ out_grant_minor:
-+ if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
-+ /* new lock @lck is the first one on minor_key list, which
-+ * means it has the smallest minor_key and it should
-+ * replace @tmp as minor_key owner */
-+ list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
-+ &lck->lk_nodes[dep].ln_major_list);
-+ }
-+ /* remove the temporary head */
-+ list_del(&list);
-+
-+ out_grant_major:
-+ ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
-+ return 1; /* granted with holding lh_lock */
-+
-+ out_enqueue:
-+ list_del(&list); /* remove temprary head */
-+ return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
-+}
-+
-+/*
-+ * release the key of @lck at level @dep, and grant any blocked locks.
-+ * caller will still listen on @key if @event is not NULL, which means
-+ * caller can see a event (by event_cb) while granting any lock with
-+ * the same key at level @dep.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
-+ */
-+static void
-+htree_node_unlock_internal(struct htree_lock_head *lhead,
-+ struct htree_lock *curlk, unsigned dep, void *event)
-+{
-+ struct htree_lock_node *curln = &curlk->lk_nodes[dep];
-+ struct htree_lock *grtlk = NULL;
-+ struct htree_lock_node *grtln;
-+ struct htree_lock *poslk;
-+ struct htree_lock *tmplk;
-+
-+ if (!htree_node_is_granted(curlk, dep))
-+ return;
-+
-+ if (!list_empty(&curln->ln_granted_list)) {
-+ /* there is another granted lock */
-+ grtlk = list_entry(curln->ln_granted_list.next,
-+ struct htree_lock,
-+ lk_nodes[dep].ln_granted_list);
-+ list_del_init(&curln->ln_granted_list);
-+ }
-+
-+ if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
-+ /*
-+ * @curlk is the only granted lock, so we confirmed:
-+ * a) curln is key owner (attached on major/minor_list),
-+ * so if there is any blocked lock, it should be attached
-+ * on curln->ln_blocked_list
-+ * b) we always can grant the first blocked lock
-+ */
-+ grtlk = list_entry(curln->ln_blocked_list.next,
-+ struct htree_lock,
-+ lk_nodes[dep].ln_blocked_list);
-+ BUG_ON(grtlk->lk_task == NULL);
-+ wake_up_process(grtlk->lk_task);
-+ }
-+
-+ if (event != NULL &&
-+ lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
-+ curln->ln_ev_target = event;
-+ curln->ln_mode = HTREE_LOCK_NL; /* listen! */
-+ } else {
-+ curln->ln_mode = HTREE_LOCK_INVAL;
-+ }
-+
-+ if (grtlk == NULL) { /* I must be the only one locking this key */
-+ struct htree_lock_node *tmpln;
-+
-+ BUG_ON(htree_key_list_empty(curln));
-+
-+ if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
-+ return;
-+
-+ /* not listening */
-+ if (list_empty(&curln->ln_alive_list)) { /* no more listener */
-+ htree_key_list_del_init(curln);
-+ return;
-+ }
-+
-+ tmpln = list_entry(curln->ln_alive_list.next,
-+ struct htree_lock_node, ln_alive_list);
-+
-+ BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
-+
-+ htree_key_list_replace_init(curln, tmpln);
-+ list_del_init(&curln->ln_alive_list);
-+
-+ return;
-+ }
-+
-+ /* have a granted lock */
-+ grtln = &grtlk->lk_nodes[dep];
-+ if (!list_empty(&curln->ln_blocked_list)) {
-+ /* only key owner can be on both lists */
-+ BUG_ON(htree_key_list_empty(curln));
-+
-+ if (list_empty(&grtln->ln_blocked_list)) {
-+ list_add(&grtln->ln_blocked_list,
-+ &curln->ln_blocked_list);
-+ }
-+ list_del_init(&curln->ln_blocked_list);
-+ }
-+ /*
-+ * NB: this is the tricky part:
-+ * We have only two modes for child-lock (PR and PW), also,
-+ * only owner of the key (attached on major/minor_list) can be on
-+ * both blocked_list and granted_list, so @grtlk must be one
-+ * of these two cases:
-+ *
-+ * a) @grtlk is taken from granted_list, which means we've granted
-+ * more than one lock so @grtlk has to be PR, the first blocked
-+ * lock must be PW and we can't grant it at all.
-+ * So even @grtlk is not owner of the key (empty blocked_list),
-+ * we don't care because we can't grant any lock.
-+ * b) we just grant a new lock which is taken from head of blocked
-+ * list, and it should be the first granted lock, and it should
-+ * be the first one linked on blocked_list.
-+ *
-+ * Either way, we can get correct result by iterating blocked_list
-+ * of @grtlk, and don't have to bother on how to find out
-+ * owner of current key.
-+ */
-+ list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
-+ lk_nodes[dep].ln_blocked_list) {
-+ if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
-+ poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
-+ break;
-+ /* grant all readers */
-+ list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
-+ list_add(&poslk->lk_nodes[dep].ln_granted_list,
-+ &grtln->ln_granted_list);
-+
-+ BUG_ON(poslk->lk_task == NULL);
-+ wake_up_process(poslk->lk_task);
-+ }
-+
-+ /* if @curln is the owner of this key, replace it with @grtln */
-+ if (!htree_key_list_empty(curln))
-+ htree_key_list_replace_init(curln, grtln);
-+
-+ if (curln->ln_mode == HTREE_LOCK_INVAL)
-+ list_del_init(&curln->ln_alive_list);
-+}
-+
-+/*
-+ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
-+ * and 0 only if @wait is false and can't grant it immediately
-+ */
-+int
-+htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
-+ u32 key, unsigned dep, int wait, void *event)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int rc;
-+
-+ BUG_ON(dep >= lck->lk_depth);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_spin_lock(lhead, dep);
-+ rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, dep);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_node_lock_try);
-+
-+/* it's wrapper of htree_node_unlock_internal */
-+void
-+htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+
-+ BUG_ON(dep >= lck->lk_depth);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_spin_lock(lhead, dep);
-+ htree_node_unlock_internal(lhead, lck, dep, event);
-+ htree_spin_unlock(lhead, dep);
-+}
-+EXPORT_SYMBOL(htree_node_unlock);
-+
-+/* stop listening on child-lock level @dep */
-+void
-+htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
-+{
-+ struct htree_lock_node *ln = &lck->lk_nodes[dep];
-+ struct htree_lock_node *tmp;
-+
-+ BUG_ON(htree_node_is_granted(lck, dep));
-+ BUG_ON(!list_empty(&ln->ln_blocked_list));
-+ BUG_ON(!list_empty(&ln->ln_granted_list));
-+
-+ if (!htree_node_is_listening(lck, dep))
-+ return;
-+
-+ htree_spin_lock(lck->lk_head, dep);
-+ ln->ln_mode = HTREE_LOCK_INVAL;
-+ ln->ln_ev_target = NULL;
-+
-+ if (htree_key_list_empty(ln)) { /* not owner */
-+ list_del_init(&ln->ln_alive_list);
-+ goto out;
-+ }
-+
-+ /* I'm the owner... */
-+ if (list_empty(&ln->ln_alive_list)) { /* no more listener */
-+ htree_key_list_del_init(ln);
-+ goto out;
-+ }
-+
-+ tmp = list_entry(ln->ln_alive_list.next,
-+ struct htree_lock_node, ln_alive_list);
-+
-+ BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
-+ htree_key_list_replace_init(ln, tmp);
-+ list_del_init(&ln->ln_alive_list);
-+ out:
-+ htree_spin_unlock(lck->lk_head, dep);
-+}
-+EXPORT_SYMBOL(htree_node_stop_listen);
-+
-+/* release all child-locks if we have any */
-+static void
-+htree_node_release_all(struct htree_lock *lck)
-+{
-+ int i;
-+
-+ for (i = 0; i < lck->lk_depth; i++) {
-+ if (htree_node_is_granted(lck, i))
-+ htree_node_unlock(lck, i, NULL);
-+ else if (htree_node_is_listening(lck, i))
-+ htree_node_stop_listen(lck, i);
-+ }
-+}
-+
-+/*
-+ * obtain htree lock, it could be blocked inside if there's conflict
-+ * with any granted or blocked lock and @wait is true.
-+ * NB: ALWAYS called holding lhead::lh_lock
-+ */
-+static int
-+htree_lock_internal(struct htree_lock *lck, int wait)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int granted = 0;
-+ int blocked = 0;
-+ int i;
-+
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ if (lhead->lh_ngranted[i] != 0)
-+ granted |= 1 << i;
-+ if (lhead->lh_nblocked[i] != 0)
-+ blocked |= 1 << i;
-+ }
-+ if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
-+ (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
-+ /* will block current lock even it just conflicts with any
-+ * other blocked lock, so lock like EX wouldn't starve */
-+ if (!wait)
-+ return -1;
-+ lhead->lh_nblocked[lck->lk_mode]++;
-+ lk_block_inc(lck->lk_mode);
-+
-+ lck->lk_task = current;
-+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
-+
-+retry:
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ /* wait to be given the lock */
-+ if (lck->lk_task != NULL)
-+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING.
-+ * Since thread would be waken up accidentally,
-+ * so we need check lock whether granted or not again. */
-+ if (!list_empty(&lck->lk_blocked_list)) {
-+ htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+ if (list_empty(&lck->lk_blocked_list)) {
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ return 0;
-+ }
-+ goto retry;
-+ }
-+ return 0; /* without lh_lock */
-+ }
-+ lhead->lh_ngranted[lck->lk_mode]++;
-+ lk_grant_inc(lck->lk_mode);
-+ return 1;
-+}
-+
-+/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
-+static void
-+htree_unlock_internal(struct htree_lock *lck)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ struct htree_lock *tmp;
-+ struct htree_lock *tmp2;
-+ int granted = 0;
-+ int i;
-+
-+ BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
-+
-+ lhead->lh_ngranted[lck->lk_mode]--;
-+ lck->lk_mode = HTREE_LOCK_INVAL;
-+
-+ for (i = 0; i < HTREE_LOCK_MAX; i++) {
-+ if (lhead->lh_ngranted[i] != 0)
-+ granted |= 1 << i;
-+ }
-+ list_for_each_entry_safe(tmp, tmp2,
-+ &lhead->lh_blocked_list, lk_blocked_list) {
-+ /* conflict with any granted lock? */
-+ if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
-+ break;
-+
-+ list_del_init(&tmp->lk_blocked_list);
-+
-+ BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
-+
-+ lhead->lh_nblocked[tmp->lk_mode]--;
-+ lhead->lh_ngranted[tmp->lk_mode]++;
-+ granted |= 1 << tmp->lk_mode;
-+
-+ BUG_ON(tmp->lk_task == NULL);
-+ wake_up_process(tmp->lk_task);
-+ }
-+}
-+
-+/* it's wrapper of htree_lock_internal and exported interface.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ htree_lock_mode_t mode, int wait)
-+{
-+ int rc;
-+
-+ BUG_ON(lck->lk_depth > lhead->lh_depth);
-+ BUG_ON(lck->lk_head != NULL);
-+ BUG_ON(lck->lk_task != NULL);
-+
-+ lck->lk_head = lhead;
-+ lck->lk_mode = mode;
-+
-+ htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+ rc = htree_lock_internal(lck, wait);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_lock_try);
-+
-+/* it's wrapper of htree_unlock_internal and exported interface.
-+ * It will release all htree_node_locks and htree_lock */
-+void
-+htree_unlock(struct htree_lock *lck)
-+{
-+ BUG_ON(lck->lk_head == NULL);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+
-+ htree_node_release_all(lck);
-+
-+ htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
-+ htree_unlock_internal(lck);
-+ htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
-+ lck->lk_head = NULL;
-+ lck->lk_task = NULL;
-+}
-+EXPORT_SYMBOL(htree_unlock);
-+
-+/* change lock mode */
-+void
-+htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
-+{
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
-+ lck->lk_mode = mode;
-+}
-+EXPORT_SYMBOL(htree_change_mode);
-+
-+/* release htree lock, and lock it again with new mode.
-+ * This function will first release all htree_node_locks and htree_lock,
-+ * then try to gain htree_lock with new @mode.
-+ * It always return 1 with granted lock if @wait is true, it can return 0
-+ * if @wait is false and locking request can't be granted immediately */
-+int
-+htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
-+{
-+ struct htree_lock_head *lhead = lck->lk_head;
-+ int rc;
-+
-+ BUG_ON(lhead == NULL);
-+ BUG_ON(lck->lk_mode == mode);
-+ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
-+
-+ htree_node_release_all(lck);
-+
-+ htree_spin_lock(lhead, HTREE_DEP_ROOT);
-+ htree_unlock_internal(lck);
-+ lck->lk_mode = mode;
-+ rc = htree_lock_internal(lck, wait);
-+ if (rc != 0)
-+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
-+ return rc >= 0;
-+}
-+EXPORT_SYMBOL(htree_change_lock_try);
-+
-+/* create a htree_lock head with @depth levels (number of child-locks),
-+ * it is a per resoruce structure */
-+struct htree_lock_head *
-+htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
-+{
-+ struct htree_lock_head *lhead;
-+ int i;
-+
-+ if (depth > HTREE_LOCK_DEP_MAX) {
-+ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+ depth, HTREE_LOCK_DEP_MAX);
-+ return NULL;
-+ }
-+
-+ lhead = kzalloc(offsetof(struct htree_lock_head,
-+ lh_children[depth]) + priv, GFP_NOFS);
-+ if (lhead == NULL)
-+ return NULL;
-+
-+ if (hbits < HTREE_HBITS_MIN)
-+ lhead->lh_hbits = HTREE_HBITS_MIN;
-+ else if (hbits > HTREE_HBITS_MAX)
-+ lhead->lh_hbits = HTREE_HBITS_MAX;
-+
-+ lhead->lh_lock = 0;
-+ lhead->lh_depth = depth;
-+ INIT_LIST_HEAD(&lhead->lh_blocked_list);
-+ if (priv > 0) {
-+ lhead->lh_private = (void *)lhead +
-+ offsetof(struct htree_lock_head, lh_children[depth]);
-+ }
-+
-+ for (i = 0; i < depth; i++) {
-+ INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
-+ lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
-+ }
-+ return lhead;
-+}
-+EXPORT_SYMBOL(htree_lock_head_alloc);
-+
-+/* free the htree_lock head */
-+void
-+htree_lock_head_free(struct htree_lock_head *lhead)
-+{
-+ int i;
-+
-+ BUG_ON(!list_empty(&lhead->lh_blocked_list));
-+ for (i = 0; i < lhead->lh_depth; i++)
-+ BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
-+ kfree(lhead);
-+}
-+EXPORT_SYMBOL(htree_lock_head_free);
-+
-+/* register event callback for @events of child-lock at level @dep */
-+void
-+htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
-+ unsigned events, htree_event_cb_t callback)
-+{
-+ BUG_ON(lhead->lh_depth <= dep);
-+ lhead->lh_children[dep].lc_events = events;
-+ lhead->lh_children[dep].lc_callback = callback;
-+}
-+EXPORT_SYMBOL(htree_lock_event_attach);
-+
-+/* allocate a htree_lock, which is per-thread structure, @pbytes is some
-+ * extra-bytes as private data for caller */
-+struct htree_lock *
-+htree_lock_alloc(unsigned depth, unsigned pbytes)
-+{
-+ struct htree_lock *lck;
-+ int i = offsetof(struct htree_lock, lk_nodes[depth]);
-+
-+ if (depth > HTREE_LOCK_DEP_MAX) {
-+ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
-+ depth, HTREE_LOCK_DEP_MAX);
-+ return NULL;
-+ }
-+ lck = kzalloc(i + pbytes, GFP_NOFS);
-+ if (lck == NULL)
-+ return NULL;
-+
-+ if (pbytes != 0)
-+ lck->lk_private = (void *)lck + i;
-+ lck->lk_mode = HTREE_LOCK_INVAL;
-+ lck->lk_depth = depth;
-+ INIT_LIST_HEAD(&lck->lk_blocked_list);
-+
-+ for (i = 0; i < depth; i++) {
-+ struct htree_lock_node *node = &lck->lk_nodes[i];
-+
-+ node->ln_mode = HTREE_LOCK_INVAL;
-+ INIT_LIST_HEAD(&node->ln_major_list);
-+ INIT_LIST_HEAD(&node->ln_minor_list);
-+ INIT_LIST_HEAD(&node->ln_alive_list);
-+ INIT_LIST_HEAD(&node->ln_blocked_list);
-+ INIT_LIST_HEAD(&node->ln_granted_list);
-+ }
-+
-+ return lck;
-+}
-+EXPORT_SYMBOL(htree_lock_alloc);
-+
-+/* free htree_lock node */
-+void
-+htree_lock_free(struct htree_lock *lck)
-+{
-+ BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
-+ kfree(lck);
-+}
-+EXPORT_SYMBOL(htree_lock_free);
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/ext4.h
-@@ -27,6 +27,7 @@
- #include <linux/mutex.h>
- #include <linux/timer.h>
- #include <linux/wait.h>
-+#include <linux/htree_lock.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #ifdef __KERNEL__
-@@ -1625,6 +1626,71 @@ ext4_dir_htree_level(struct super_block
- EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
- }
-
-+/* assume name-hash is protected by upper layer */
-+#define EXT4_HTREE_LOCK_HASH 0
-+
-+enum ext4_pdo_lk_types {
-+#if EXT4_HTREE_LOCK_HASH
-+ EXT4_LK_HASH,
-+#endif
-+ EXT4_LK_DX, /* index block */
-+ EXT4_LK_DE, /* directory entry block */
-+ EXT4_LK_SPIN, /* spinlock */
-+ EXT4_LK_MAX,
-+};
-+
-+/* read-only bit */
-+#define EXT4_LB_RO(b) (1 << (b))
-+/* read + write, high bits for writer */
-+#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
-+
-+enum ext4_pdo_lock_bits {
-+ /* DX lock bits */
-+ EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX),
-+ EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX),
-+ /* DE lock bits */
-+ EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE),
-+ EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE),
-+ /* DX spinlock bits */
-+ EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN),
-+ EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN),
-+ /* accurate searching */
-+ EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1),
-+};
-+
-+enum ext4_pdo_lock_opc {
-+ /* external */
-+ EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
-+ EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
-+
-+ /* internal */
-+ EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
-+ EXT4_LB_EXACT),
-+ EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
-+ EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
-+};
-+
-+extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
-+#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead)
-+
-+extern struct htree_lock *ext4_htree_lock_alloc(void);
-+#define ext4_htree_lock_free(lck) htree_lock_free(lck)
-+
-+extern void ext4_htree_lock(struct htree_lock *lck,
-+ struct htree_lock_head *lhead,
-+ struct inode *dir, unsigned flags);
-+#define ext4_htree_unlock(lck) htree_unlock(lck)
-+
-+extern struct buffer_head * __ext4_find_entry(struct inode *dir,
-+ const struct qstr *d_name,
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck);
-+extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode, struct htree_lock *lck);
- void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
- ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
-
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/namei.c
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/namei.c
-@@ -176,7 +176,7 @@ static struct dx_frame *dx_probe(const s
- struct inode *dir,
- struct dx_hash_info *hinfo,
- struct dx_frame *frame,
-- int *err);
-+ struct htree_lock *lck, int *err);
- static void dx_release(struct dx_frame *frames);
- static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-@@ -189,13 +189,13 @@ static void dx_insert_block(struct dx_fr
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
-- __u32 *start_hash);
-+ __u32 *start_hash, struct htree_lock *lck);
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
- const struct qstr *d_name,
- struct ext4_dir_entry_2 **res_dir,
-- int *err);
-+ struct htree_lock *lck, int *err);
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode);
-+ struct inode *inode, struct htree_lock *lck);
-
- /*
- * p is at least 6 bytes before the end of page
-@@ -368,6 +368,225 @@ struct stats dx_show_entries(struct dx_h
- }
- #endif /* DX_DEBUG */
-
-+/* private data for htree_lock */
-+struct ext4_dir_lock_data {
-+ unsigned ld_flags; /* bits-map for lock types */
-+ unsigned ld_count; /* # entries of the last DX block */
-+ struct dx_entry ld_at_entry; /* copy of leaf dx_entry */
-+ struct dx_entry *ld_at; /* position of leaf dx_entry */
-+};
-+
-+#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private)
-+#define ext4_find_entry(dir, name, dirent) __ext4_find_entry(dir, name, dirent, NULL)
-+#define ext4_add_entry(handle, dentry, inode) __ext4_add_entry(handle, dentry, inode, NULL)
-+
-+/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
-+#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32)
-+
-+static void ext4_htree_event_cb(void *target, void *event)
-+{
-+ u64 *block = (u64 *)target;
-+
-+ if (*block == dx_get_block((struct dx_entry *)event))
-+ *block = EXT4_HTREE_NODE_CHANGED;
-+}
-+
-+struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
-+{
-+ struct htree_lock_head *lhead;
-+
-+ lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
-+ if (lhead != NULL) {
-+ htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
-+ ext4_htree_event_cb);
-+ }
-+ return lhead;
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
-+
-+struct htree_lock *ext4_htree_lock_alloc(void)
-+{
-+ return htree_lock_alloc(EXT4_LK_MAX,
-+ sizeof(struct ext4_dir_lock_data));
-+}
-+EXPORT_SYMBOL(ext4_htree_lock_alloc);
-+
-+static htree_lock_mode_t ext4_htree_mode(unsigned flags)
-+{
-+ switch (flags) {
-+ default: /* 0 or unknown flags require EX lock */
-+ return HTREE_LOCK_EX;
-+ case EXT4_HLOCK_READDIR:
-+ return HTREE_LOCK_PR;
-+ case EXT4_HLOCK_LOOKUP:
-+ return HTREE_LOCK_CR;
-+ case EXT4_HLOCK_DEL:
-+ case EXT4_HLOCK_ADD:
-+ return HTREE_LOCK_CW;
-+ }
-+}
-+
-+/* return PR for read-only operations, otherwise return EX */
-+static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
-+{
-+ int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
-+
-+ /* 0 requires EX lock */
-+ return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
-+}
-+
-+static int ext4_htree_safe_locked(struct htree_lock *lck)
-+{
-+ int writer;
-+
-+ if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
-+ return 1;
-+
-+ writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
-+ EXT4_LB_DE;
-+ if (writer) /* all readers & writers are excluded? */
-+ return lck->lk_mode == HTREE_LOCK_EX;
-+
-+ /* all writers are excluded? */
-+ return lck->lk_mode == HTREE_LOCK_PR ||
-+ lck->lk_mode == HTREE_LOCK_PW ||
-+ lck->lk_mode == HTREE_LOCK_EX;
-+}
-+
-+/* relock htree_lock with EX mode if it's change operation, otherwise
-+ * relock it with PR mode. It's noop if PDO is disabled. */
-+static void ext4_htree_safe_relock(struct htree_lock *lck)
-+{
-+ if (!ext4_htree_safe_locked(lck)) {
-+ unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
-+
-+ htree_change_lock(lck, ext4_htree_safe_mode(flags));
-+ }
-+}
-+
-+void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
-+ struct inode *dir, unsigned flags)
-+{
-+ htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
-+ ext4_htree_safe_mode(flags);
-+
-+ ext4_htree_lock_data(lck)->ld_flags = flags;
-+ htree_lock(lck, lhead, mode);
-+ if (!is_dx(dir))
-+ ext4_htree_safe_relock(lck); /* make sure it's safe locked */
-+}
-+EXPORT_SYMBOL(ext4_htree_lock);
-+
-+static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
-+ unsigned lmask, int wait, void *ev)
-+{
-+ u32 key = (at == NULL) ? 0 : dx_get_block(at);
-+ u32 mode;
-+
-+ /* NOOP if htree is well protected or caller doesn't require the lock */
-+ if (ext4_htree_safe_locked(lck) ||
-+ !(ext4_htree_lock_data(lck)->ld_flags & lmask))
-+ return 1;
-+
-+ mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
-+ HTREE_LOCK_PW : HTREE_LOCK_PR;
-+ while (1) {
-+ if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
-+ return 1;
-+ if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
-+ return 0;
-+ cpu_relax(); /* spin until granted */
-+ }
-+}
-+
-+static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
-+{
-+ return ext4_htree_safe_locked(lck) ||
-+ htree_node_is_granted(lck, ffz(~lmask));
-+}
-+
-+static void ext4_htree_node_unlock(struct htree_lock *lck,
-+ unsigned lmask, void *buf)
-+{
-+ /* NB: it's safe to call mutiple times or even it's not locked */
-+ if (!ext4_htree_safe_locked(lck) &&
-+ htree_node_is_granted(lck, ffz(~lmask)))
-+ htree_node_unlock(lck, ffz(~lmask), buf);
-+}
-+
-+#define ext4_htree_dx_lock(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
-+#define ext4_htree_dx_lock_try(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
-+#define ext4_htree_dx_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
-+#define ext4_htree_dx_locked(lck) \
-+ ext4_htree_node_locked(lck, EXT4_LB_DX)
-+
-+static void ext4_htree_dx_need_lock(struct htree_lock *lck)
-+{
-+ struct ext4_dir_lock_data *ld;
-+
-+ if (ext4_htree_safe_locked(lck))
-+ return;
-+
-+ ld = ext4_htree_lock_data(lck);
-+ switch (ld->ld_flags) {
-+ default:
-+ return;
-+ case EXT4_HLOCK_LOOKUP:
-+ ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
-+ return;
-+ case EXT4_HLOCK_DEL:
-+ ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
-+ return;
-+ case EXT4_HLOCK_ADD:
-+ ld->ld_flags = EXT4_HLOCK_SPLIT;
-+ return;
-+ }
-+}
-+
-+#define ext4_htree_de_lock(lck, key) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
-+#define ext4_htree_de_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
-+
-+#define ext4_htree_spin_lock(lck, key, event) \
-+ ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
-+#define ext4_htree_spin_unlock(lck) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
-+#define ext4_htree_spin_unlock_listen(lck, p) \
-+ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
-+
-+static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
-+{
-+ if (!ext4_htree_safe_locked(lck) &&
-+ htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
-+ htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
-+}
-+
-+enum {
-+ DX_HASH_COL_IGNORE, /* ignore collision while probing frames */
-+ DX_HASH_COL_YES, /* there is collision and it does matter */
-+ DX_HASH_COL_NO, /* there is no collision */
-+};
-+
-+static int dx_probe_hash_collision(struct htree_lock *lck,
-+ struct dx_entry *entries,
-+ struct dx_entry *at, u32 hash)
-+{
-+ if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
-+ return DX_HASH_COL_IGNORE; /* don't care about collision */
-+
-+ } else if (at == entries + dx_get_count(entries) - 1) {
-+ return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
-+
-+ } else { /* hash collision? */
-+ return ((dx_get_hash(at + 1) & ~1) == hash) ?
-+ DX_HASH_COL_YES : DX_HASH_COL_NO;
-+ }
-+}
-+
- /*
- * Probe for a directory leaf block to search.
- *
-@@ -379,10 +598,11 @@ struct stats dx_show_entries(struct dx_h
- */
- static struct dx_frame *
- dx_probe(const struct qstr *d_name, struct inode *dir,
-- struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-+ struct dx_hash_info *hinfo, struct dx_frame *frame_in,
-+ struct htree_lock *lck, int *err)
- {
- unsigned count, indirect;
-- struct dx_entry *at, *entries, *p, *q, *m;
-+ struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
- struct dx_root_info * info;
- struct buffer_head *bh;
- struct dx_frame *frame = frame_in;
-@@ -447,8 +667,15 @@ dx_probe(const struct qstr *d_name, stru
- dxtrace(printk("Look up %x", hash));
- while (1)
- {
-+ if (indirect == 0) { /* the last index level */
-+ /* NB: ext4_htree_dx_lock() could be noop if
-+ * DX-lock flag is not set for current operation */
-+ ext4_htree_dx_lock(lck, dx);
-+ ext4_htree_spin_lock(lck, dx, NULL);
-+ }
- count = dx_get_count(entries);
-- if (!count || count > dx_get_limit(entries)) {
-+ if (count == 0 || count > dx_get_limit(entries)) {
-+ ext4_htree_spin_unlock(lck); /* release spin */
- ext4_warning(dir->i_sb,
- "dx entry: no count or count > limit");
- brelse(bh);
-@@ -489,9 +716,73 @@ dx_probe(const struct qstr *d_name, stru
- frame->bh = bh;
- frame->entries = entries;
- frame->at = at;
-- if (!indirect--) return frame;
-+
-+ if (indirect == 0) { /* the last index level */
-+ struct ext4_dir_lock_data *ld;
-+ u64 myblock;
-+
-+ /* By default we only lock DE-block, however, we will
-+ * also lock the last level DX-block if:
-+ * a) there is hash collision
-+ * we will set DX-lock flag (a few lines below)
-+ * and redo to lock DX-block
-+ * see detail in dx_probe_hash_collision()
-+ * b) it's a retry from splitting
-+ * we need to lock the last level DX-block so nobody
-+ * else can split any leaf blocks under the same
-+ * DX-block, see detail in ext4_dx_add_entry()
-+ */
-+ if (ext4_htree_dx_locked(lck)) {
-+ /* DX-block is locked, just lock DE-block
-+ * and return */
-+ ext4_htree_spin_unlock(lck);
-+ if (!ext4_htree_safe_locked(lck))
-+ ext4_htree_de_lock(lck, frame->at);
-+ return frame;
-+ }
-+ /* it's pdirop and no DX lock */
-+ if (dx_probe_hash_collision(lck, entries, at, hash) ==
-+ DX_HASH_COL_YES) {
-+ /* found hash collision, set DX-lock flag
-+ * and retry to abtain DX-lock */
-+ ext4_htree_spin_unlock(lck);
-+ ext4_htree_dx_need_lock(lck);
-+ continue;
-+ }
-+ ld = ext4_htree_lock_data(lck);
-+ /* because I don't lock DX, so @at can't be trusted
-+ * after I release spinlock so I have to save it */
-+ ld->ld_at = at;
-+ ld->ld_at_entry = *at;
-+ ld->ld_count = dx_get_count(entries);
-+
-+ frame->at = &ld->ld_at_entry;
-+ myblock = dx_get_block(at);
-+
-+ /* NB: ordering locking */
-+ ext4_htree_spin_unlock_listen(lck, &myblock);
-+ /* other thread can split this DE-block because:
-+ * a) I don't have lock for the DE-block yet
-+ * b) I released spinlock on DX-block
-+ * if it happened I can detect it by listening
-+ * splitting event on this DE-block */
-+ ext4_htree_de_lock(lck, frame->at);
-+ ext4_htree_spin_stop_listen(lck);
-+
-+ if (myblock == EXT4_HTREE_NODE_CHANGED) {
-+ /* someone split this DE-block before
-+ * I locked it, I need to retry and lock
-+ * valid DE-block */
-+ ext4_htree_de_unlock(lck);
-+ continue;
-+ }
-+ return frame;
-+ }
-+ dx = at;
-+ indirect--;
- if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
- goto fail2;
-+
- at = entries = ((struct dx_node *) bh->b_data)->entries;
- if (dx_get_limit(entries) != dx_node_limit (dir)) {
- ext4_warning(dir->i_sb,
-@@ -553,7 +844,7 @@ static void dx_release (struct dx_frame
- static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
-- __u32 *start_hash)
-+ __u32 *start_hash, struct htree_lock *lck)
- {
- struct dx_frame *p;
- struct buffer_head *bh;
-@@ -568,12 +859,22 @@ static int ext4_htree_next_block(struct
- * this loop, num_frames indicates the number of interior
- * nodes need to be read.
- */
-+ ext4_htree_de_unlock(lck);
- while (1) {
-- if (++(p->at) < p->entries + dx_get_count(p->entries))
-- break;
-+ if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
-+ /* num_frames > 0 :
-+ * DX block
-+ * ext4_htree_dx_locked:
-+ * frame->at is reliable pointer returned by dx_probe,
-+ * otherwise dx_probe already knew no collision */
-+ if (++(p->at) < p->entries + dx_get_count(p->entries))
-+ break;
-+ }
- if (p == frames)
- return 0;
- num_frames++;
-+ if (num_frames == 1)
-+ ext4_htree_dx_unlock(lck);
- p--;
- }
-
-@@ -596,6 +897,13 @@ static int ext4_htree_next_block(struct
- * block so no check is necessary
- */
- while (num_frames--) {
-+ if (num_frames == 0) {
-+ /* it's not always necessary, we just don't want to
-+ * detect hash collision again */
-+ ext4_htree_dx_need_lock(lck);
-+ ext4_htree_dx_lock(lck, p->at);
-+ }
-+
- if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
- 0, &err)))
- return err; /* Failure */
-@@ -604,6 +912,7 @@ static int ext4_htree_next_block(struct
- p->bh = bh;
- p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
- }
-+ ext4_htree_de_lock(lck, p->at);
- return 1;
- }
-
-@@ -696,10 +1005,10 @@ int ext4_htree_fill_tree(struct file *di
- }
- hinfo.hash = start_hash;
- hinfo.minor_hash = 0;
-- frame = dx_probe(NULL, dir, &hinfo, frames, &err);
-+ /* assume it's PR locked */
-+ frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
- if (!frame)
- return err;
--
- /* Add '.' and '..' from the htree header */
- if (!start_hash && !start_minor_hash) {
- de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-@@ -726,7 +1035,7 @@ int ext4_htree_fill_tree(struct file *di
- count += ret;
- hashval = ~0;
- ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
-- frame, frames, &hashval);
-+ frame, frames, &hashval, NULL);
- *next_hash = hashval;
- if (ret < 0) {
- err = ret;
-@@ -826,9 +1135,17 @@ static void dx_insert_block(struct dx_fr
-
- static void ext4_update_dx_flag(struct inode *inode)
- {
-+ /* Disable it for ldiskfs, because going from a DX directory to
-+ * a non-DX directory while it is in use will completely break
-+ * the htree-locking.
-+ * If we really want to support this operation in the future,
-+ * we need to exclusively lock the directory at here which will
-+ * increase complexity of code */
-+#if 0
- if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_COMPAT_DIR_INDEX))
- ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-+#endif
- }
-
- /*
-@@ -900,9 +1217,10 @@ static inline int search_dirblock(struct
- * The returned buffer_head has ->b_count elevated. The caller is expected
- * to brelse() it when appropriate.
- */
--static struct buffer_head * ext4_find_entry (struct inode *dir,
-+struct buffer_head * __ext4_find_entry(struct inode *dir,
- const struct qstr *d_name,
-- struct ext4_dir_entry_2 ** res_dir)
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck)
- {
- struct super_block *sb;
- struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -923,7 +1241,7 @@ static struct buffer_head * ext4_find_en
- if (namelen > EXT4_NAME_LEN)
- return NULL;
- if (is_dx(dir)) {
-- bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
-+ bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
- /*
- * On success, or if the error was file not found,
- * return. Otherwise, fall back to doing a search the
-@@ -933,6 +1251,7 @@ static struct buffer_head * ext4_find_en
- return bh;
- dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
- "falling back\n"));
-+ ext4_htree_safe_relock(lck);
- }
- nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
- start = EXT4_I(dir)->i_dir_start_lookup;
-@@ -1008,9 +1327,12 @@ cleanup_and_exit:
- brelse(bh_use[ra_ptr]);
- return ret;
- }
-+EXPORT_SYMBOL(__ext4_find_entry);
-
--static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
-- struct ext4_dir_entry_2 **res_dir, int *err)
-+static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-+ const struct qstr *d_name,
-+ struct ext4_dir_entry_2 **res_dir,
-+ struct htree_lock *lck, int *err)
- {
- struct super_block * sb;
- struct dx_hash_info hinfo;
-@@ -1026,13 +1348,16 @@ static struct buffer_head * ext4_dx_find
- sb = dir->i_sb;
- /* NFS may look up ".." - look at dx_root directory block */
- if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
-- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-+ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
- return NULL;
- } else {
- frame = frames;
- frame->bh = NULL; /* for dx_release() */
- frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
- dx_set_block(frame->at, 0); /* dx_root block is 0 */
-+ /* "." and ".." are stored in root DX lock */
-+ ext4_htree_dx_need_lock(lck);
-+ ext4_htree_dx_lock(lck, NULL);
- }
- hash = hinfo.hash;
- do {
-@@ -1061,7 +1386,7 @@ static struct buffer_head * ext4_dx_find
- brelse(bh);
- /* Check to see if we should continue to search */
- retval = ext4_htree_next_block(dir, hash, frame,
-- frames, NULL);
-+ frames, NULL, lck);
- if (retval < 0) {
- ext4_warning(sb,
- "error reading index page in directory #%lu",
-@@ -1244,8 +1569,9 @@ static struct ext4_dir_entry_2* dx_pack_
- * Returns pointer to de in block into which the new entry will be inserted.
- */
- static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-- struct buffer_head **bh,struct dx_frame *frame,
-- struct dx_hash_info *hinfo, int *error)
-+ struct buffer_head **bh, struct dx_frame *frames,
-+ struct dx_frame *frame, struct dx_hash_info *hinfo,
-+ struct htree_lock *lck, int *error)
- {
- unsigned blocksize = dir->i_sb->s_blocksize;
- unsigned count, continued;
-@@ -1302,7 +1628,14 @@ static struct ext4_dir_entry_2 *do_split
- hash2, split, count-split));
-
- /* Fancy dance to stay within two buffers */
-- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
-+ if (hinfo->hash < hash2) {
-+ de2 = dx_move_dirents(data1, data2, map + split,
-+ count - split, blocksize);
-+ } else {
-+ /* make sure we will add entry to the same block which
-+ * we have already locked */
-+ de2 = dx_move_dirents(data1, data2, map, split, blocksize);
-+ }
- de = dx_pack_dirents(data1, blocksize);
- de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
- blocksize);
-@@ -1311,13 +1644,21 @@ static struct ext4_dir_entry_2 *do_split
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
-
-- /* Which block gets the new entry? */
-- if (hinfo->hash >= hash2)
-- {
-- swap(*bh, bh2);
-- de = de2;
-+ ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
-+ frame->at); /* notify block is being split */
-+ if (hinfo->hash < hash2) {
-+ dx_insert_block(frame, hash2 + continued, newblock);
-+
-+ } else {
-+ /* switch block number */
-+ dx_insert_block(frame, hash2 + continued,
-+ dx_get_block(frame->at));
-+ dx_set_block(frame->at, newblock);
-+ (frame->at)++;
- }
-- dx_insert_block(frame, hash2 + continued, newblock);
-+ ext4_htree_spin_unlock(lck);
-+ ext4_htree_dx_unlock(lck);
-+
- err = ext4_handle_dirty_metadata(handle, dir, bh2);
- if (err)
- goto journal_error;
-@@ -1558,8 +1899,8 @@ static int make_indexed_dir(handle_t *ha
- retval = ext4_handle_dirty_metadata(handle, dir, bh2);
- if (retval)
- goto out_frames;
-
-- de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
-+ de = do_split(handle,dir, &bh2, frames, frame, &hinfo, NULL, &retval);
- if (!de)
- goto out_frames;
-
-@@ -1664,8 +2005,8 @@ out:
- * may not sleep between calling this and putting something into
- * the entry, as someone else might have used it while you slept.
- */
--static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode)
-+int __ext4_add_entry(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode, struct htree_lock *lck)
- {
- struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
-@@ -1684,9 +2025,10 @@ static int ext4_add_entry(handle_t *hand
- if (dentry->d_name.len == 2 &&
- memcmp(dentry->d_name.name, "..", 2) == 0)
- return ext4_update_dotdot(handle, dentry, inode);
-- retval = ext4_dx_add_entry(handle, dentry, inode);
-+ retval = ext4_dx_add_entry(handle, dentry, inode, lck);
- if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
-+ ext4_htree_safe_relock(lck);
- ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
- dx_fallback++;
- ext4_mark_inode_dirty(handle, dir);
-@@ -1717,12 +2059,13 @@ static int ext4_add_entry(handle_t *hand
- brelse(bh);
- return retval;
- }
-+EXPORT_SYMBOL(__ext4_add_entry);
-
- /*
- * Returns 0 for success, or a negative error value
- */
- static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-- struct inode *inode)
-+ struct inode *inode, struct htree_lock *lck)
- {
- struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
- struct dx_entry *entries, *at;
-@@ -1736,7 +2079,7 @@ static int ext4_dx_add_entry(handle_t *h
-
- again:
- restart = 0;
-- frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-+ frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
- if (!frame)
- return err;
- entries = frame->entries;
-@@ -1763,6 +2106,11 @@ again:
- struct dx_node *node2;
- struct buffer_head *bh2;
-
-+ if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
-+ ext4_htree_safe_relock(lck);
-+ restart = 1;
-+ goto cleanup;
-+ }
- while (frame > frames) {
- if (dx_get_count((frame - 1)->entries) <
- dx_get_limit((frame - 1)->entries)) {
-@@ -1860,16 +2208,43 @@ again:
- restart = 1;
- goto cleanup;
- }
-+ } else if (!ext4_htree_dx_locked(lck)) {
-+ struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
-+
-+ /* not well protected, require DX lock */
-+ ext4_htree_dx_need_lock(lck);
-+ at = frame > frames ? (frame - 1)->at : NULL;
-+
-+ /* NB: no risk of deadlock because it's just a try.
-+ *
-+ * NB: we check ld_count for twice, the first time before
-+ * having DX lock, the second time after holding DX lock.
-+ *
-+ * NB: We never free blocks for directory so far, which
-+ * means value returned by dx_get_count() should equal to
-+ * ld->ld_count if nobody split any DE-block under @at,
-+ * and ld->ld_at still points to valid dx_entry. */
-+ if ((ld->ld_count != dx_get_count(entries)) ||
-+ !ext4_htree_dx_lock_try(lck, at) ||
-+ (ld->ld_count != dx_get_count(entries))) {
-+ restart = 1;
-+ goto cleanup;
-+ }
-+ /* OK, I've got DX lock and nothing changed */
-+ frame->at = ld->ld_at;
- }
-- de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-+ de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
- if (!de)
- goto cleanup;
-+
- err = add_dirent_to_buf(handle, dentry, inode, de, bh);
- goto cleanup;
-
- journal_error:
- ext4_std_error(dir->i_sb, err);
- cleanup:
-+ ext4_htree_dx_unlock(lck);
-+ ext4_htree_de_unlock(lck);
- if (bh)
- brelse(bh);
- dx_release(frames);
-Index: linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
-===================================================================
---- linux-2.6.32-504.3.3.el6.x86_64.orig/fs/ext4/Makefile
-+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
-
- ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-+ htree_lock.o \
- ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o
-
+++ /dev/null
-Index: linux-stage/fs/ext4/ext4.h
-===================================================================
---- linux-stage.orig/fs/ext4/ext4.h
-+++ linux-stage/fs/ext4/ext4.h
-@@ -1136,11 +1136,14 @@ struct ext4_sb_info {
-
- /* tunables */
- unsigned long s_stripe;
-- unsigned int s_mb_stream_request;
-+ unsigned long s_mb_small_req;
-+ unsigned long s_mb_large_req;
- unsigned int s_mb_max_to_scan;
- unsigned int s_mb_min_to_scan;
- unsigned int s_mb_stats;
- unsigned int s_mb_order2_reqs;
-+ unsigned long *s_mb_prealloc_table;
-+ unsigned long s_mb_prealloc_table_size;
- unsigned int s_mb_group_prealloc;
- unsigned int s_max_writeback_mb_bump;
- /* where last allocation was done - for stream allocation */
-Index: linux-stage/fs/ext4/mballoc.c
-===================================================================
---- linux-stage.orig/fs/ext4/mballoc.c
-+++ linux-stage/fs/ext4/mballoc.c
-@@ -1838,6 +1838,26 @@ void ext4_mb_complex_scan_group(struct e
- ext4_mb_check_limits(ac, e4b, 1);
- }
-
-+static int ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
-+{
-+ int i;
-+
-+ if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
-+ return -1;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (sbi->s_mb_prealloc_table[i] == 0) {
-+ sbi->s_mb_prealloc_table[i] = value;
-+ return 0;
-+ }
-+
-+ /* they should add values in order */
-+ if (value <= sbi->s_mb_prealloc_table[i])
-+ return -1;
-+ }
-+ return -1;
-+}
-+
- /*
- * This is a special case for storages like raid5
- * we try to find stripe-aligned chunks for stripe-size requests
-@@ -2155,6 +2175,82 @@ static const struct seq_operations ext4_
- .show = ext4_mb_seq_groups_show,
- };
-
-+#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
-+
-+static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ int len = 0;
-+ int i;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
-+ len += sprintf(page + len, "%ld ",
-+ sbi->s_mb_prealloc_table[i]);
-+ len += sprintf(page + len, "\n");
-+
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext4_mb_prealloc_table_proc_write(struct file *file,
-+ const char __user *buf,
-+ unsigned long cnt, void *data)
-+{
-+ struct ext4_sb_info *sbi = data;
-+ unsigned long value;
-+ unsigned long prev = 0;
-+ char str[128];
-+ char *cur;
-+ char *end;
-+ unsigned long *new_table;
-+ int num = 0;
-+ int i = 0;
-+
-+ if (cnt >= sizeof(str))
-+ return -EINVAL;
-+ if (copy_from_user(str, buf, cnt))
-+ return -EFAULT;
-+
-+ num = 0;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ if (value == 0)
-+ break;
-+ if (value <= prev)
-+ return -EINVAL;
-+ prev = value;
-+ num++;
-+ }
-+
-+ new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
-+ if (new_table == NULL)
-+ return -ENOMEM;
-+ kfree(sbi->s_mb_prealloc_table);
-+ memset(new_table, 0, num * sizeof(*new_table));
-+ sbi->s_mb_prealloc_table = new_table;
-+ sbi->s_mb_prealloc_table_size = num;
-+ cur = str;
-+ end = str + cnt;
-+ while (cur < end && i < num) {
-+ while ((cur < end) && (*cur == ' ')) cur++;
-+ value = simple_strtol(cur, &cur, 0);
-+ if (ext4_mb_prealloc_table_add(sbi, value) == 0)
-+ ++i;
-+ }
-+ if (i != num)
-+ sbi->s_mb_prealloc_table_size = i;
-+
-+ return cnt;
-+}
-+
- static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
- {
- struct super_block *sb = PDE(inode)->data;
-@@ -2346,7 +2442,7 @@ err_freesgi:
- int ext4_mb_init(struct super_block *sb, int needs_recovery)
- {
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-- unsigned i, j;
-+ unsigned i, j, k, l;
- unsigned offset, offset_incr;
- unsigned max;
- int ret;
-@@ -2380,26 +2476,61 @@ int ext4_mb_init(struct super_block *sb,
- i++;
- } while (i <= sb->s_blocksize_bits + 1);
-
-- /* init file for buddy data */
-- ret = ext4_mb_init_backend(sb);
-- if (ret != 0) {
-- kfree(sbi->s_mb_offsets);
-- kfree(sbi->s_mb_maxs);
-- return ret;
-- }
--
- spin_lock_init(&sbi->s_md_lock);
- spin_lock_init(&sbi->s_bal_lock);
-
- sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
- sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
- sbi->s_mb_stats = MB_DEFAULT_STATS;
-- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
- sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
-- sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
-+
-+ if (sbi->s_stripe == 0) {
-+ sbi->s_mb_prealloc_table_size = 10;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ for (k = 0, l = 4; k <= 9; ++k, l *= 2) {
-+ if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+ sbi->s_mb_prealloc_table_size = k;
-+ break;
-+ }
-+ }
-+
-+ sbi->s_mb_small_req = 256;
-+ sbi->s_mb_large_req = 1024;
-+ sbi->s_mb_group_prealloc = 512;
-+ } else {
-+ sbi->s_mb_prealloc_table_size = 3;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2) {
-+ if (ext4_mb_prealloc_table_add(sbi, l) < 0) {
-+ sbi->s_mb_prealloc_table_size = k;
-+ break;
-+ }
-+ }
-+
-+ sbi->s_mb_small_req = sbi->s_stripe;
-+ sbi->s_mb_large_req = sbi->s_stripe * 8;
-+ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-+ }
-
- sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
- if (sbi->s_locality_groups == NULL) {
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return -ENOMEM;
-@@ -2413,9 +2544,27 @@ int ext4_mb_init(struct super_block *sb,
- spin_lock_init(&lg->lg_prealloc_lock);
- }
-
-- if (sbi->s_proc)
-+ /* init file for buddy data */
-+ ret = ext4_mb_init_backend(sb);
-+ if (ret != 0) {
-+ kfree(sbi->s_mb_prealloc_table);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return ret;
-+ }
-+
-+ if (sbi->s_proc) {
-+ struct proc_dir_entry *p;
- proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
- &ext4_mb_seq_groups_fops, sb);
-+ p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
-+ S_IRUGO | S_IWUSR, sbi->s_proc);
-+ if (p) {
-+ p->data = sbi;
-+ p->read_proc = ext4_mb_prealloc_table_proc_read;
-+ p->write_proc = ext4_mb_prealloc_table_proc_write;
-+ }
-+ }
-
- if (sbi->s_journal)
- sbi->s_journal->j_commit_callback = release_blocks_on_commit;
-@@ -2448,8 +2597,10 @@ int ext4_mb_release(struct super_block *
- struct ext4_group_info *grinfo;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-- if (sbi->s_proc)
-+ if (sbi->s_proc) {
- remove_proc_entry("mb_groups", sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+ }
-
- if (sbi->s_group_info) {
- for (i = 0; i < ngroups; i++) {
-@@ -2469,6 +2620,7 @@ int ext4_mb_release(struct super_block *
- kfree(sbi->s_group_info[i]);
- ext4_kvfree(sbi->s_group_info);
- }
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- if (sbi->s_buddy_cache)
-@@ -2798,11 +2950,12 @@ static noinline_for_stack void
- ext4_mb_normalize_request(struct ext4_allocation_context *ac,
- struct ext4_allocation_request *ar)
- {
-- int bsbits, max;
-+ int bsbits, i, wind;
- ext4_lblk_t end;
-- loff_t size, orig_size, start_off;
-+ loff_t size, orig_size;
- ext4_lblk_t start, orig_start;
- struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
-+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_prealloc_space *pa;
-
- /* do normalize only data requests, metadata requests
-@@ -2832,49 +2985,35 @@ ext4_mb_normalize_request(struct ext4_al
- size = size << bsbits;
- if (size < i_size_read(ac->ac_inode))
- size = i_size_read(ac->ac_inode);
-+ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
-
-- /* max size of free chunks */
-- max = 2 << bsbits;
-+ start = wind = 0;
-
--#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
-- (req <= (size) || max <= (chunk_size))
-+ /* let's choose preallocation window depending on file size */
-+ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
-+ if (size <= sbi->s_mb_prealloc_table[i]) {
-+ wind = sbi->s_mb_prealloc_table[i];
-+ break;
-+ }
-+ }
-+ size = wind;
-
-- /* first, try to predict filesize */
-- /* XXX: should this table be tunable? */
-- start_off = 0;
-- if (size <= 16 * 1024) {
-- size = 16 * 1024;
-- } else if (size <= 32 * 1024) {
-- size = 32 * 1024;
-- } else if (size <= 64 * 1024) {
-- size = 64 * 1024;
-- } else if (size <= 128 * 1024) {
-- size = 128 * 1024;
-- } else if (size <= 256 * 1024) {
-- size = 256 * 1024;
-- } else if (size <= 512 * 1024) {
-- size = 512 * 1024;
-- } else if (size <= 1024 * 1024) {
-- size = 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (21 - bsbits)) << 21;
-- size = 2 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (22 - bsbits)) << 22;
-- size = 4 * 1024 * 1024;
-- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
-- (8<<20)>>bsbits, max, 8 * 1024)) {
-- start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
-- (23 - bsbits)) << 23;
-- size = 8 * 1024 * 1024;
-- } else {
-- start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
-- size = ac->ac_o_ex.fe_len << bsbits;
-+ if (wind == 0) {
-+ __u64 tstart, tend;
-+ /* file is quite large, we now preallocate with
-+ * the biggest configured window with regart to
-+ * logical offset */
-+ wind = sbi->s_mb_prealloc_table[i - 1];
-+ tstart = ac->ac_o_ex.fe_logical;
-+ do_div(tstart, wind);
-+ start = tstart * wind;
-+ tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
-+ do_div(tend, wind);
-+ tend = tend * wind + wind;
-+ size = tend - start;
- }
-- orig_size = size = size >> bsbits;
-- orig_start = start = start_off >> bsbits;
-+ orig_size = size;
-+ orig_start = start;
-
- /* don't cover already allocated blocks in selected range */
- if (ar->pleft && start <= ar->lleft) {
-@@ -2946,7 +3085,6 @@ ext4_mb_normalize_request(struct ext4_al
- }
- BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
- start > ac->ac_o_ex.fe_logical);
-- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
-
- /* now prepare goal request */
-
-@@ -3930,11 +4068,19 @@ static void ext4_mb_group_or_file(struct
-
- /* don't use group allocation for large files */
- size = max(size, isize);
-- if (size > sbi->s_mb_stream_request) {
-+ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
-+ (size >= sbi->s_mb_large_req)) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
- return;
- }
-
-+ /*
-+ * request is so large that we don't care about
-+ * streaming - it overweights any possible seek
-+ */
-+ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
-+ return;
-+
- BUG_ON(ac->ac_lg != NULL);
- /*
- * locality group prealloc space are per cpu. The reason for having
-Index: linux-stage/fs/ext4/super.c
-===================================================================
---- linux-stage.orig/fs/ext4/super.c
-+++ linux-stage/fs/ext4/super.c
-@@ -2377,7 +2377,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
- EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
- EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
--EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-+EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
-+EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
- EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
- EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
-
-@@ -2391,7 +2392,8 @@ static struct attribute *ext4_attrs[] =
- ATTR_LIST(mb_max_to_scan),
- ATTR_LIST(mb_min_to_scan),
- ATTR_LIST(mb_order2_req),
-- ATTR_LIST(mb_stream_req),
-+ ATTR_LIST(mb_small_req),
-+ ATTR_LIST(mb_large_req),
- ATTR_LIST(mb_group_prealloc),
- ATTR_LIST(max_writeback_mb_bump),
- NULL,
-Index: linux-stage/fs/ext4/inode.c
-===================================================================
---- linux-stage.orig/fs/ext4/inode.c
-+++ linux-stage/fs/ext4/inode.c
-@@ -3070,6 +3070,11 @@ static int ext4_da_writepages(struct add
- if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
- return -EROFS;
-
-+ if (wbc->nr_to_write < sbi->s_mb_small_req) {
-+ nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
-+ wbc->nr_to_write = sbi->s_mb_small_req;
-+ }
-+
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
-
+++ /dev/null
-Index: linux-2.6.27.21-0.1/fs/ext4/ext4_extents.h
-===================================================================
---- linux-2.6.27.21-0.1.orig/fs/ext4/ext4_extents.h 2009-07-07 14:47:22.000000000 +0530
-+++ linux-2.6.27.21-0.1/fs/ext4/ext4_extents.h 2009-07-07 14:49:31.000000000 +0530
-@@ -203,6 +203,11 @@
- return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
- }
-
-+static inline void ext4_ext_tree_changed(struct inode *inode)
-+{
-+ EXT4_I(inode)->i_ext_generation++;
-+}
-+
- static inline void
- ext4_ext_invalidate_cache(struct inode *inode)
- {
-Index: linux-2.6.27.21-0.1/fs/ext4/ext4.h
-===================================================================
---- linux-2.6.27.21-0.1.orig/fs/ext4/ext4.h 2009-07-07 14:47:13.000000000 +0530
-+++ linux-2.6.27.21-0.1/fs/ext4/ext4.h 2009-07-07 14:50:07.000000000 +0530
-@@ -114,6 +114,7 @@
- struct inode vfs_inode;
- struct jbd2_inode jinode;
-
-+ unsigned long i_ext_generation;
- struct ext4_ext_cache i_cached_extent;
- /*
- * File creation time. Its function is same as that of
-Index: linux-2.6.27.21-0.1/fs/ext4/extents.c
-===================================================================
---- linux-2.6.27.21-0.1.orig/fs/ext4/extents.c 2009-07-07 14:48:03.000000000 +0530
-+++ linux-2.6.27.21-0.1/fs/ext4/extents.c 2009-07-07 14:48:58.000000000 +0530
-@@ -1618,6 +1618,7 @@
- ext4_ext_drop_refs(npath);
- kfree(npath);
- }
-+ ext4_ext_tree_changed(inode);
- ext4_ext_invalidate_cache(inode);
- return err;
- }
-@@ -2279,6 +2280,7 @@
- }
- }
- out:
-+ ext4_ext_tree_changed(inode);
- ext4_ext_drop_refs(path);
- kfree(path);
- ext4_journal_stop(handle);
+++ /dev/null
-diff -urpN linux-stage.orig/fs/ext4/super.c linux-stage/fs/ext4/super.c
---- linux-stage.orig/fs/ext4/super.c 2013-05-13 11:44:45.000000000 -0400
-+++ linux-stage/fs/ext4/super.c 2013-05-13 11:50:19.000000000 -0400
-@@ -1248,8 +1248,8 @@ enum {
- Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
- Opt_extents, Opt_noextents,
- Opt_no_mbcache,
-- Opt_discard, Opt_nodiscard,
-- Opt_init_inode_table, Opt_noinit_inode_table,
-+ Opt_discard, Opt_nodiscard, Opt_init_inode_table, Opt_noinit_inode_table,
-+ Opt_max_dir_size_kb,
- };
-
- static const match_table_t tokens = {
-@@ -1326,6 +1326,7 @@ static const match_table_t tokens = {
- {Opt_noextents, "noextents"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
-+ {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
- {Opt_err, NULL},
- };
-
-@@ -1708,6 +1709,13 @@ set_qf_format:
- case Opt_nodelalloc:
- clear_opt(sbi->s_mount_opt, DELALLOC);
- break;
-+ case Opt_max_dir_size_kb:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_max_dir_size = option * 1024;
-+ break;
- case Opt_stripe:
- if (match_int(&args[0], &option))
- return 0;
+++ /dev/null
-diff -urpN linux-stage.orig/fs/ext4/balloc.c linux-stage/fs/ext4/balloc.c
---- linux-stage.orig/fs/ext4/balloc.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/balloc.c 2012-07-02 12:07:57.000000000 -0400
-@@ -97,7 +97,7 @@ unsigned ext4_init_block_bitmap(struct s
- /* If checksum is bad mark all blocks used to prevent allocation
- * essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Checksum bad for group %u", block_group);
- ext4_free_blks_set(sb, gdp, 0);
- ext4_free_inodes_set(sb, gdp, 0);
-@@ -207,10 +207,8 @@ struct ext4_group_desc * ext4_get_group_
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
- if (block_group >= ngroups) {
-- ext4_error(sb, "ext4_get_group_desc",
-- "block_group >= groups_count - "
-- "block_group = %u, groups_count = %u",
-- block_group, ngroups);
-+ ext4_error(sb, "block_group >= groups_count - block_group = %u,"
-+ " groups_count = %u", block_group, ngroups);
-
- return NULL;
- }
-@@ -218,8 +216,7 @@ struct ext4_group_desc * ext4_get_group_
- group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
- offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- if (!sbi->s_group_desc[group_desc]) {
-- ext4_error(sb, "ext4_get_group_desc",
-- "Group descriptor not loaded - "
-+ ext4_error(sb, "Group descriptor not loaded - "
- "block_group = %u, group_desc = %u, desc = %u",
- block_group, group_desc, offset);
- return NULL;
-@@ -280,7 +277,7 @@ static int ext4_valid_block_bitmap(struc
- return 1;
-
- err_out:
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Invalid block bitmap - "
- "block_group = %d, block = %llu",
- block_group, bitmap_blk);
-@@ -309,7 +306,7 @@ ext4_read_block_bitmap(struct super_bloc
- bitmap_blk = ext4_block_bitmap(sb, desc);
- bh = sb_getblk(sb, bitmap_blk);
- if (unlikely(!bh)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Cannot read block bitmap - "
- "block_group = %u, block_bitmap = %llu",
- block_group, bitmap_blk);
-@@ -352,7 +349,7 @@ ext4_read_block_bitmap(struct super_bloc
- set_bitmap_uptodate(bh);
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Cannot read block bitmap - "
- "block_group = %u, block_bitmap = %llu",
- block_group, bitmap_blk);
-@@ -417,7 +414,7 @@ void ext4_add_groupblocks(handle_t *hand
- in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
- in_range(block + count - 1, ext4_inode_table(sb, desc),
- sbi->s_itb_per_group)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Adding blocks in system zones - "
- "Block = %llu, count = %lu",
- block, count);
-@@ -451,7 +448,7 @@ void ext4_add_groupblocks(handle_t *hand
- BUFFER_TRACE(bitmap_bh, "clear bit");
- if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
- bit + i, bitmap_bh->b_data)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "bit already cleared for block %llu",
- (ext4_fsblk_t)(block + i));
- BUFFER_TRACE(bitmap_bh, "bit already cleared");
-diff -urpN linux-stage.orig/fs/ext4/dir.c linux-stage/fs/ext4/dir.c
---- linux-stage.orig/fs/ext4/dir.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/dir.c 2012-07-02 12:07:57.000000000 -0400
-@@ -70,28 +70,29 @@ int ext4_check_dir_entry(const char *fun
- const int rlen = ext4_rec_len_from_disk(de->rec_len,
- dir->i_sb->s_blocksize);
-
-- if (rlen < EXT4_DIR_REC_LEN(1))
-+ if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
- error_msg = "rec_len is smaller than minimal";
-- else if (rlen % 4 != 0)
-+ else if (unlikely(rlen % 4 != 0))
- error_msg = "rec_len % 4 != 0";
-- else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
-+ else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
- error_msg = "rec_len is too small for name_len";
-- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
-+ else if (unlikely(((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
- error_msg = "directory entry across blocks";
-- else if (le32_to_cpu(de->inode) >
-- le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
-+ else if (unlikely(le32_to_cpu(de->inode) >
-+ le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
- error_msg = "inode out of bounds";
-+ else
-+ return 1;
-
-- if (error_msg != NULL)
-- ext4_error(dir->i_sb, function,
-- "bad entry in directory #%lu: %s - block=%llu"
-- "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-- dir->i_ino, error_msg,
-- (unsigned long long) bh->b_blocknr,
-- (unsigned) (offset%bh->b_size), offset,
-- le32_to_cpu(de->inode),
-- rlen, de->name_len);
-- return error_msg == NULL ? 1 : 0;
-+ ext4_error(dir->i_sb,
-+ "bad entry in directory #%lu: %s - block=%llu"
-+ "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-+ dir->i_ino, error_msg,
-+ (unsigned long long) bh->b_blocknr,
-+ (unsigned) (offset%bh->b_size), offset,
-+ le32_to_cpu(de->inode),
-+ rlen, de->name_len);
-+ return 0;
- }
-
- static int ext4_readdir(struct file *filp,
-@@ -152,7 +153,7 @@ static int ext4_readdir(struct file *fil
- */
- if (!bh) {
- if (!dir_has_error) {
-- ext4_error(sb, __func__, "directory #%lu "
-+ ext4_error(sb, "directory #%lu "
- "contains a hole at offset %Lu",
- inode->i_ino,
- (unsigned long long) filp->f_pos);
-diff -urpN linux-stage.orig/fs/ext4/ext4.h linux-stage/fs/ext4/ext4.h
---- linux-stage.orig/fs/ext4/ext4.h 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/ext4.h 2012-07-02 12:10:15.000000000 -0400
-@@ -23,6 +23,7 @@
- #include <linux/quota.h>
- #include <linux/rwsem.h>
- #include <linux/rbtree.h>
-+#include <linux/kernel.h>
- #include <linux/seqlock.h>
- #include <linux/mutex.h>
- #include <linux/timer.h>
-@@ -56,6 +57,12 @@
- #define ext4_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT4_ERROR_INODE(inode, fmt, a...) \
-+ ext4_error_inode(__func__, (inode), (fmt), ## a);
-+
-+#define EXT4_ERROR_FILE(file, fmt, a...) \
-+ ext4_error_file(__func__, (file), (fmt), ## a);
-+
- /* data type for block offset of block group */
- typedef int ext4_grpblk_t;
-
-@@ -970,7 +977,27 @@ struct ext4_super_block {
- __u8 s_reserved_char_pad2;
- __le16 s_reserved_pad;
- __le64 s_kbytes_written; /* nr of lifetime kilobytes written */
-- __u32 s_reserved[160]; /* Padding to the end of the block */
-+ __le32 s_snapshot_inum; /* Inode number of active snapshot */
-+ __le32 s_snapshot_id; /* sequential ID of active snapshot */
-+ __le64 s_snapshot_r_blocks_count; /* reserved blocks for active
-+ snapshot's future use */
-+ __le32 s_snapshot_list; /* inode number of the head of the
-+ on-disk snapshot list */
-+#define EXT4_S_ERR_START offsetof(struct ext4_super_block, s_error_count)
-+ __le32 s_error_count; /* number of fs errors */
-+ __le32 s_first_error_time; /* first time an error happened */
-+ __le32 s_first_error_ino; /* inode involved in first error */
-+ __le64 s_first_error_block; /* block involved of first error */
-+ __u8 s_first_error_func[32]; /* function where the error happened */
-+ __le32 s_first_error_line; /* line number where error happened */
-+ __le32 s_last_error_time; /* most recent time of an error */
-+ __le32 s_last_error_ino; /* inode involved in last error */
-+ __le32 s_last_error_line; /* line number where error happened */
-+ __le64 s_last_error_block; /* block involved of last error */
-+ __u8 s_last_error_func[32]; /* function where the error happened */
-+#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
-+ __u8 s_mount_opts[64];
-+ __le32 s_reserved[112]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -1110,6 +1137,9 @@ struct ext4_sb_info {
-
- /* workqueue for dio unwritten */
- struct workqueue_struct *dio_unwritten_wq;
-+
-+ /* Lazy inode table initialization info */
-+ struct ext4_li_request *s_li_request;
- };
-
- static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
-@@ -1431,6 +1461,23 @@ void ext4_get_group_no_and_offset(struct
- extern struct proc_dir_entry *ext4_proc_root;
-
- /*
-+ * Timeout and state flag for lazy initialization inode thread.
-+ */
-+#define EXT4_DEF_LI_WAIT_MULT 10
-+#define EXT4_DEF_LI_MAX_START_DELAY 5
-+#define EXT4_LAZYINIT_QUIT 0x0001
-+#define EXT4_LAZYINIT_RUNNING 0x0002
-+
-+/*
-+ * Lazy inode table initialization info
-+ */
-+struct ext4_lazy_init {
-+ unsigned long li_state;
-+ struct list_head li_request_list;
-+ struct mutex li_list_mtx;
-+};
-+
-+/*
- * Function prototypes
- */
-
-@@ -1509,6 +1556,7 @@ extern struct buffer_head *ext4_read_ino
- extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
-
- /* mballoc.c */
-+struct fstrim_range;
- extern long ext4_mb_stats;
- extern long ext4_mb_max_to_scan;
- extern int ext4_mb_init(struct super_block *, int);
-@@ -1526,6 +1574,8 @@ extern int ext4_mb_add_groupinfo(struct
- extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
- extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
- ext4_group_t, int);
-+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
-+
- /* inode.c */
- int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
- struct buffer_head *bh, ext4_fsblk_t blocknr);
-@@ -1586,13 +1636,17 @@ extern int ext4_group_extend(struct supe
- ext4_fsblk_t n_blocks_count);
-
- /* super.c */
--extern void ext4_error(struct super_block *, const char *, const char *, ...)
-+extern void __ext4_error(struct super_block *, const char *, const char *, ...)
-+ __attribute__ ((format (printf, 3, 4)));
-+#define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
-+extern void ext4_error_inode(const char *, struct inode *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
- extern void __ext4_std_error(struct super_block *, const char *, int);
- extern void ext4_abort(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
--extern void ext4_warning(struct super_block *, const char *, const char *, ...)
-+extern void __ext4_warning(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-+#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
- extern void ext4_msg(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
- extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
-@@ -1774,11 +1828,19 @@ struct ext4_group_info {
- * 5 free 8-block regions. */
- };
-
--#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
-
- #define EXT4_MB_GRP_NEED_INIT(grp) \
- (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
-
-+#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
-+ (test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
-+#define EXT4_MB_GRP_SET_TRIMMED(grp) \
-+ (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
-+#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
-+ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
-+
- #define EXT4_MAX_CONTENTION 8
- #define EXT4_CONTENTION_THRESHOLD 2
-
-diff -urpN linux-stage.orig/fs/ext4/ext4_jbd2.c linux-stage/fs/ext4/ext4_jbd2.c
---- linux-stage.orig/fs/ext4/ext4_jbd2.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/ext4_jbd2.c 2012-07-02 12:07:57.000000000 -0400
-@@ -96,7 +96,7 @@ int __ext4_handle_dirty_metadata(const c
- if (inode && inode_needs_sync(inode)) {
- sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "IO error syncing inode, "
- "inode=%lu, block=%llu",
- inode->i_ino,
-diff -urpN linux-stage.orig/fs/ext4/extents.c linux-stage/fs/ext4/extents.c
---- linux-stage.orig/fs/ext4/extents.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/extents.c 2012-07-02 12:07:57.000000000 -0400
-@@ -437,7 +437,7 @@ static int __ext4_ext_check(const char *
- return 0;
-
- corrupted:
-- ext4_error(inode->i_sb, function,
-+ ext4_error(inode->i_sb,
- "bad header/extent in inode #%lu: %s - magic %x, "
- "entries %u, max %u(%u), depth %u(%u)",
- inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
-@@ -1534,7 +1534,7 @@ int ext4_ext_try_to_merge(struct inode *
- merge_done = 1;
- WARN_ON(eh->eh_entries == 0);
- if (!eh->eh_entries)
-- ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
-+ ext4_error(inode->i_sb,
- "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
- }
-
-@@ -1784,7 +1784,11 @@ int ext4_ext_walk_space(struct inode *in
- }
-
- depth = ext_depth(inode);
-- BUG_ON(path[depth].p_hdr == NULL);
-+ if (unlikely(path[depth].p_hdr == NULL)) {
-+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
-+ err = -EIO;
-+ break;
-+ }
- ex = path[depth].p_ext;
- next = ext4_ext_next_allocated_block(path);
-
-@@ -1835,7 +1839,11 @@ int ext4_ext_walk_space(struct inode *in
- cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
- }
-
-- BUG_ON(cbex.ec_len == 0);
-+ if (unlikely(cbex.ec_len == 0)) {
-+ EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
-+ err = -EIO;
-+ break;
-+ }
- err = func(inode, path, &cbex, ex, cbdata);
- ext4_ext_drop_refs(path);
-
-@@ -3284,7 +3292,7 @@ int ext4_ext_get_blocks(handle_t *handle
- * this is why assert can't be put in ext4_ext_find_extent()
- */
- if (path[depth].p_ext == NULL && depth != 0) {
-- ext4_error(inode->i_sb, __func__, "bad extent address "
-+ ext4_error(inode->i_sb, "bad extent address "
- "inode: %lu, iblock: %lu, depth: %d",
- inode->i_ino, (unsigned long) iblock, depth);
- err = -EIO;
-@@ -3415,7 +3423,7 @@ int ext4_ext_get_blocks(handle_t *handle
-
- if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
- if (unlikely(!eh->eh_entries)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode#%lu, eh->eh_entries = 0 and "
- "EOFBLOCKS_FL set", inode->i_ino);
- err = -EIO;
-diff -urpN linux-stage.orig/fs/ext4/ialloc.c linux-stage/fs/ext4/ialloc.c
---- linux-stage.orig/fs/ext4/ialloc.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/ialloc.c 2012-07-02 12:07:57.000000000 -0400
-@@ -76,7 +76,7 @@ unsigned ext4_init_inode_bitmap(struct s
- /* If checksum is bad mark all blocks and inodes use to prevent
- * allocation, essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-- ext4_error(sb, __func__, "Checksum bad for group %u",
-+ ext4_error(sb, "Checksum bad for group %u",
- block_group);
- ext4_free_blks_set(sb, gdp, 0);
- ext4_free_inodes_set(sb, gdp, 0);
-@@ -111,7 +111,7 @@ ext4_read_inode_bitmap(struct super_bloc
- bitmap_blk = ext4_inode_bitmap(sb, desc);
- bh = sb_getblk(sb, bitmap_blk);
- if (unlikely(!bh)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Cannot read inode bitmap - "
- "block_group = %u, inode_bitmap = %llu",
- block_group, bitmap_blk);
-@@ -153,7 +153,7 @@ ext4_read_inode_bitmap(struct super_bloc
- set_bitmap_uptodate(bh);
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Cannot read inode bitmap - "
- "block_group = %u, inode_bitmap = %llu",
- block_group, bitmap_blk);
-@@ -230,8 +230,7 @@ void ext4_free_inode(handle_t *handle, s
-
- es = EXT4_SB(sb)->s_es;
- if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-- ext4_error(sb, "ext4_free_inode",
-- "reserved or nonexistent inode %lu", ino);
-+ ext4_error(sb, "reserved or nonexistent inode %lu", ino);
- goto error_return;
- }
- block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
-@@ -286,11 +285,10 @@ out:
- fatal = err;
- sb->s_dirt = 1;
- } else
-- ext4_error(sb, "ext4_free_inode",
-- "bit already cleared for inode %lu", ino);
-+ ext4_error(sb, "bit already cleared for inode %lu", ino);
-
- error_return:
-- brelse(bitmap_bh);
-+ brelse(bitmap_bh);
- ext4_std_error(sb, fatal);
- }
-
-@@ -730,7 +728,7 @@ static int ext4_claim_inode(struct super
- if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
- ino > EXT4_INODES_PER_GROUP(sb)) {
- ext4_unlock_group(sb, group);
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "reserved inode or inode > inodes count - "
- "block_group = %u, inode=%lu", group,
- ino + group * EXT4_INODES_PER_GROUP(sb));
-@@ -1094,7 +1092,7 @@ struct inode *ext4_orphan_get(struct sup
-
- /* Error cases - e2fsck has already cleaned up for us */
- if (ino > max_ino) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "bad orphan ino %lu! e2fsck was run?", ino);
- goto error;
- }
-@@ -1103,7 +1101,7 @@ struct inode *ext4_orphan_get(struct sup
- bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
- bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
- if (!bitmap_bh) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "inode bitmap error for orphan %lu", ino);
- goto error;
- }
-@@ -1136,7 +1134,7 @@ iget_failed:
- err = PTR_ERR(inode);
- inode = NULL;
- bad_orphan:
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "bad orphan inode %lu! e2fsck was run?", ino);
- printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
- bit, (unsigned long long)bitmap_bh->b_blocknr,
-diff -urpN linux-stage.orig/fs/ext4/inode.c linux-stage/fs/ext4/inode.c
---- linux-stage.orig/fs/ext4/inode.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/inode.c 2012-07-02 12:07:57.000000000 -0400
-@@ -246,7 +246,7 @@ void ext4_delete_inode(struct inode *ino
- inode->i_size = 0;
- err = ext4_mark_inode_dirty(handle, inode);
- if (err) {
-- ext4_warning(inode->i_sb, __func__,
-+ ext4_warning(inode->i_sb,
- "couldn't mark inode dirty (err %d)", err);
- goto stop_handle;
- }
-@@ -264,7 +264,7 @@ void ext4_delete_inode(struct inode *ino
- if (err > 0)
- err = ext4_journal_restart(handle, 3);
- if (err != 0) {
-- ext4_warning(inode->i_sb, __func__,
-+ ext4_warning(inode->i_sb,
- "couldn't extend journal (err %d)", err);
- stop_handle:
- ext4_journal_stop(handle);
-@@ -375,8 +375,7 @@ static int ext4_block_to_path(struct ino
- offsets[n++] = i_block & (ptrs - 1);
- final = ptrs;
- } else {
-- ext4_warning(inode->i_sb, "ext4_block_to_path",
-- "block %lu > max in inode %lu",
-+ ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
- i_block + direct_blocks +
- indirect_blocks + double_blocks, inode->i_ino);
- }
-@@ -396,7 +395,7 @@ static int __ext4_check_blockref(const c
- if (blk &&
- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- blk, 1))) {
-- ext4_error(inode->i_sb, function,
-+ ext4_error(inode->i_sb,
- "invalid block reference %u "
- "in inode #%lu", blk, inode->i_ino);
- return -EIO;
-@@ -1167,7 +1166,7 @@ static int check_block_validity(struct i
- sector_t logical, sector_t phys, int len)
- {
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
-- ext4_error(inode->i_sb, msg,
-+ ext4_error(inode->i_sb,
- "inode #%lu logical block %llu mapped to %llu "
- "(size %d)", inode->i_ino,
- (unsigned long long) logical,
-@@ -4316,7 +4315,7 @@ static void ext4_free_data(handle_t *han
- if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
- ext4_handle_dirty_metadata(handle, inode, this_bh);
- else
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "circular indirect block detected, "
- "inode=%lu, block=%llu",
- inode->i_ino,
-@@ -4364,7 +4363,7 @@ static void ext4_free_branches(handle_t
- * (should be rare).
- */
- if (!bh) {
-- ext4_error(inode->i_sb, "ext4_free_branches",
-+ ext4_error(inode->i_sb,
- "Read failure, inode=%lu, block=%llu",
- inode->i_ino, nr);
- continue;
-@@ -4680,9 +4679,8 @@ static int __ext4_get_inode_loc(struct i
-
- bh = sb_getblk(sb, block);
- if (!bh) {
-- ext4_error(sb, "ext4_get_inode_loc", "unable to read "
-- "inode block - inode=%lu, block=%llu",
-- inode->i_ino, block);
-+ ext4_error(sb, "unable to read inode block - "
-+ "inode=%lu, block=%llu", inode->i_ino, block);
- return -EIO;
- }
- if (!buffer_uptodate(bh)) {
-@@ -4780,7 +4778,7 @@ make_io:
- submit_bh(READ_META, bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "unable to read inode block - inode=%lu, "
- "block=%llu", inode->i_ino, block);
- brelse(bh);
-@@ -4999,7 +4997,7 @@ struct inode *ext4_iget(struct super_blo
- ret = 0;
- if (ei->i_file_acl &&
- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "bad extended attribute block %llu in inode #%lu",
- ei->i_file_acl, inode->i_ino);
- ret = -EIO;
-@@ -5046,7 +5044,7 @@ struct inode *ext4_iget(struct super_blo
- new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
- } else {
- ret = -EIO;
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "bogus i_mode (%o) for inode=%lu",
- inode->i_mode, inode->i_ino);
- goto bad_inode;
-@@ -5286,7 +5284,7 @@ int ext4_write_inode(struct inode *inode
- if (wait)
- sync_dirty_buffer(iloc.bh);
- if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "IO error syncing inode, "
- "inode=%lu, block=%llu",
- inode->i_ino,
-@@ -5707,7 +5705,7 @@ int ext4_mark_inode_dirty(handle_t *hand
- EXT4_STATE_NO_EXPAND);
- if (mnt_count !=
- le16_to_cpu(sbi->s_es->s_mnt_count)) {
-- ext4_warning(inode->i_sb, __func__,
-+ ext4_warning(inode->i_sb,
- "Unable to expand inode %lu. Delete"
- " some EAs or run e2fsck.",
- inode->i_ino);
-diff -urpN linux-stage.orig/fs/ext4/mballoc.c linux-stage/fs/ext4/mballoc.c
---- linux-stage.orig/fs/ext4/mballoc.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/mballoc.c 2012-07-02 12:07:57.000000000 -0400
-@@ -862,8 +862,6 @@ static int ext4_mb_init_cache(struct pag
-
- err = 0;
- first_block = page->index * blocks_per_page;
-- /* init the page */
-- memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
- for (i = 0; i < blocks_per_page; i++) {
- int group;
- struct ext4_group_info *grinfo;
-@@ -899,6 +897,8 @@ static int ext4_mb_init_cache(struct pag
- * incore got set to the group block bitmap below
- */
- ext4_lock_group(sb, group);
-+ /* init the page */
-+ memset(data, 0xff, blocksize);
- ext4_mb_generate_buddy(sb, data, incore, group);
- ext4_unlock_group(sb, group);
- incore = NULL;
-@@ -1862,7 +1862,6 @@ void ext4_mb_scan_aligned(struct ext4_al
- }
- }
-
--/* This is now called BEFORE we load the buddy bitmap. */
- static int ext4_mb_good_group(struct ext4_allocation_context *ac,
- ext4_group_t group, int cr)
- {
-@@ -2162,6 +2161,11 @@ static void *ext4_mb_seq_groups_next(str
- return (void *) ((unsigned long) group);
- }
-
-+static inline void ext4_mb_release_desc(struct ext4_buddy *e4b)
-+{
-+ ext4_mb_unload_buddy(e4b);
-+}
-+
- static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
- {
- struct super_block *sb = seq->private;
-@@ -2193,7 +2197,7 @@ static int ext4_mb_seq_groups_show(struc
- ext4_lock_group(sb, group);
- memcpy(&sg, ext4_get_group_info(sb, group), i);
- ext4_unlock_group(sb, group);
-- ext4_mb_unload_buddy(&e4b);
-+ ext4_mb_release_desc(&e4b);
-
- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
- sg.info.bb_fragments, sg.info.bb_first_free);
-@@ -2562,6 +2566,17 @@ int ext4_mb_release(struct super_block *
- return 0;
- }
-
-+static inline int ext4_issue_discard(struct super_block *sb,
-+ ext4_group_t block_group, ext4_grpblk_t block, int count)
-+{
-+ ext4_fsblk_t discard_block;
-+
-+ discard_block = block + ext4_group_first_block_no(sb, block_group);
-+ trace_ext4_discard_blocks(sb,
-+ (unsigned long long) discard_block, count);
-+ return sb_issue_discard(sb, discard_block, count);
-+}
-+
- /*
- * This function is called by the jbd2 layer once the commit has finished,
- * so we know we can free the blocks that were released with that commit.
-@@ -2581,22 +2596,9 @@ static void release_blocks_on_commit(jou
- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
- entry->count, entry->group, entry);
-
-- if (test_opt(sb, DISCARD)) {
-- int ret;
-- ext4_fsblk_t discard_block;
--
-- discard_block = entry->start_blk +
-- ext4_group_first_block_no(sb, entry->group);
-- trace_ext4_discard_blocks(sb,
-- (unsigned long long)discard_block,
-- entry->count);
-- ret = sb_issue_discard(sb, discard_block, entry->count);
-- if (ret == EOPNOTSUPP) {
-- ext4_warning(sb, __func__,
-- "discard not supported, disabling");
-- clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
-- }
-- }
-+ if (test_opt(sb, DISCARD))
-+ ext4_issue_discard(sb, entry->group,
-+ entry->start_blk, entry->count);
-
- err = ext4_mb_load_buddy(sb, entry->group, &e4b);
- /* we expect to find existing buddy because it's pinned */
-@@ -2611,6 +2613,15 @@ static void release_blocks_on_commit(jou
- rb_erase(&entry->node, &(db->bb_free_root));
- mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
-
-+ /*
-+ * Clear the trimmed flag for the group so that the next
-+ * ext4_trim_fs can trim it.
-+ * If the volume is mounted with -o discard, online discard
-+ * is supported and the free blocks will be trimmed online.
-+ */
-+ if (!test_opt(sb, DISCARD))
-+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
-+
- if (!db->bb_free_root.rb_node) {
- /* No more items in the per group rb tree
- * balance refcounts from ext4_mb_free_metadata()
-@@ -2620,7 +2631,7 @@ static void release_blocks_on_commit(jou
- }
- ext4_unlock_group(sb, entry->group);
- kmem_cache_free(ext4_free_ext_cachep, entry);
-- ext4_mb_unload_buddy(&e4b);
-+ ext4_mb_release_desc(&e4b);
- }
-
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
-@@ -2757,7 +2768,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
-
- len = ac->ac_b_ex.fe_len;
- if (!ext4_data_block_valid(sbi, block, len)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Allocating blocks %llu-%llu which overlap "
- "fs metadata\n", block, block+len);
- /* File system mounted not to panic on error
-@@ -3671,14 +3682,14 @@ ext4_mb_discard_group_preallocations(str
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (bitmap_bh == NULL) {
-- ext4_error(sb, __func__, "Error in reading block "
-+ ext4_error(sb, "Error in reading block "
- "bitmap for %u", group);
- return 0;
- }
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err) {
-- ext4_error(sb, __func__, "Error in loading buddy "
-+ ext4_error(sb, "Error in loading buddy "
- "information for %u", group);
- put_bh(bitmap_bh);
- return 0;
-@@ -3852,14 +3863,14 @@ repeat:
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err) {
-- ext4_error(sb, __func__, "Error in loading buddy "
-- "information for %u", group);
-+ ext4_error(sb, "Error in loading buddy information for %u",
-+ group);
- continue;
- }
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (bitmap_bh == NULL) {
-- ext4_error(sb, __func__, "Error in reading block "
-+ ext4_error(sb, "Error in reading block "
- "bitmap for %u", group);
- ext4_mb_unload_buddy(&e4b);
- continue;
-@@ -4125,7 +4136,7 @@ ext4_mb_discard_lg_preallocations(struct
-
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
- if (ext4_mb_load_buddy(sb, group, &e4b)) {
-- ext4_error(sb, __func__, "Error in loading buddy "
-+ ext4_error(sb, "Error in loading buddy "
- "information for %u", group);
- continue;
- }
-@@ -4516,7 +4527,7 @@ void ext4_mb_free_blocks(handle_t *handl
- if (block < le32_to_cpu(es->s_first_data_block) ||
- block + count < block ||
- block + count > ext4_blocks_count(es)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Freeing blocks not in datazone - "
- "block = %llu, count = %lu", block, count);
- goto error_return;
-@@ -4561,7 +4572,7 @@ do_more:
- in_range(block + count - 1, ext4_inode_table(sb, gdp),
- EXT4_SB(sb)->s_itb_per_group)) {
-
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "Freeing blocks in system zone - "
- "Block = %llu, count = %lu", block, count);
- /* err = 0. ext4_std_error should be a no op */
-diff -urpN linux-stage.orig/fs/ext4/move_extent.c linux-stage/fs/ext4/move_extent.c
---- linux-stage.orig/fs/ext4/move_extent.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/move_extent.c 2012-07-02 12:07:57.000000000 -0400
-@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inod
- int ret = 0;
-
- if (inode1 == NULL) {
-- ext4_error(inode2->i_sb, function,
-+ ext4_error(inode2->i_sb,
- "Both inodes should not be NULL: "
- "inode1 NULL inode2 %lu", inode2->i_ino);
- ret = -EIO;
- } else if (inode2 == NULL) {
-- ext4_error(inode1->i_sb, function,
-+ ext4_error(inode1->i_sb,
- "Both inodes should not be NULL: "
- "inode1 %lu inode2 NULL", inode1->i_ino);
- ret = -EIO;
-@@ -483,7 +483,7 @@ mext_leaf_block(handle_t *handle, struct
-
- o_start = o_end = oext = orig_path[depth].p_ext;
- oext_alen = ext4_ext_get_actual_len(oext);
-- start_ext.ee_len = end_ext.ee_len = 0;
-+ start_ext.ee_block = start_ext.ee_len = end_ext.ee_len = 0;
-
- new_ext.ee_block = cpu_to_le32(*from);
- ext4_ext_store_pblock(&new_ext, ext_pblock(dext));
-@@ -528,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct
- * new_ext |-------|
- */
- if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
-- ext4_error(orig_inode->i_sb, __func__,
-+ ext4_error(orig_inode->i_sb,
- "new_ext_end(%u) should be less than or equal to "
- "oext->ee_block(%u) + oext_alen(%d) - 1",
- new_ext_end, le32_to_cpu(oext->ee_block),
-@@ -691,12 +691,12 @@ mext_replace_branches(handle_t *handle,
- while (1) {
- /* The extent for donor must be found. */
- if (!dext) {
-- ext4_error(donor_inode->i_sb, __func__,
-+ ext4_error(donor_inode->i_sb,
- "The extent for donor must be found");
- *err = -EIO;
- goto out;
- } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
-- ext4_error(donor_inode->i_sb, __func__,
-+ ext4_error(donor_inode->i_sb,
- "Donor offset(%u) and the first block of donor "
- "extent(%u) should be equal",
- donor_off,
-@@ -1356,7 +1356,7 @@ ext4_move_extents(struct file *o_filp, s
- if (ret1 < 0)
- break;
- if (*moved_len > len) {
-- ext4_error(orig_inode->i_sb, __func__,
-+ ext4_error(orig_inode->i_sb,
- "We replaced blocks too much! "
- "sum of replaced: %llu requested: %llu",
- *moved_len, len);
-diff -urpN linux-stage.orig/fs/ext4/namei.c linux-stage/fs/ext4/namei.c
---- linux-stage.orig/fs/ext4/namei.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/namei.c 2012-07-02 12:07:57.000000000 -0400
-@@ -394,8 +394,7 @@ dx_probe(const struct qstr *d_name, stru
- if (root->info.hash_version != DX_HASH_TEA &&
- root->info.hash_version != DX_HASH_HALF_MD4 &&
- root->info.hash_version != DX_HASH_LEGACY) {
-- ext4_warning(dir->i_sb, __func__,
-- "Unrecognised inode hash code %d",
-+ ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
- root->info.hash_version);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -410,8 +409,7 @@ dx_probe(const struct qstr *d_name, stru
- hash = hinfo->hash;
-
- if (root->info.unused_flags & 1) {
-- ext4_warning(dir->i_sb, __func__,
-- "Unimplemented inode hash flags: %#06x",
-+ ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
- root->info.unused_flags);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -419,8 +417,7 @@ dx_probe(const struct qstr *d_name, stru
- }
-
- if ((indirect = root->info.indirect_levels) > 1) {
-- ext4_warning(dir->i_sb, __func__,
-- "Unimplemented inode hash depth: %#06x",
-+ ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
- root->info.indirect_levels);
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -432,8 +429,7 @@ dx_probe(const struct qstr *d_name, stru
-
- if (dx_get_limit(entries) != dx_root_limit(dir,
- root->info.info_length)) {
-- ext4_warning(dir->i_sb, __func__,
-- "dx entry: limit != root limit");
-+ ext4_warning(dir->i_sb, "dx entry: limit != root limit");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
-@@ -444,7 +440,7 @@ dx_probe(const struct qstr *d_name, stru
- {
- count = dx_get_count(entries);
- if (!count || count > dx_get_limit(entries)) {
-- ext4_warning(dir->i_sb, __func__,
-+ ext4_warning(dir->i_sb,
- "dx entry: no count or count > limit");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -489,7 +485,7 @@ dx_probe(const struct qstr *d_name, stru
- goto fail2;
- at = entries = ((struct dx_node *) bh->b_data)->entries;
- if (dx_get_limit(entries) != dx_node_limit (dir)) {
-- ext4_warning(dir->i_sb, __func__,
-+ ext4_warning(dir->i_sb,
- "dx entry: limit != node limit");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
-@@ -505,7 +501,7 @@ fail2:
- }
- fail:
- if (*err == ERR_BAD_DX_DIR)
-- ext4_warning(dir->i_sb, __func__,
-+ ext4_warning(dir->i_sb,
- "Corrupt dir inode %ld, running e2fsck is "
- "recommended.", dir->i_ino);
- return NULL;
-@@ -969,7 +965,7 @@ restart:
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- /* read error, skip block & hope for the best */
-- ext4_error(sb, __func__, "reading directory #%lu "
-+ ext4_error(sb, "reading directory #%lu "
- "offset %lu", dir->i_ino,
- (unsigned long)block);
- brelse(bh);
-@@ -1012,8 +1008,9 @@ cleanup_and_exit:
- static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
- struct ext4_dir_entry_2 **res_dir, int *err)
- {
-- struct super_block * sb = dir->i_sb;
-+ struct super_block * sb;
- struct dx_hash_info hinfo;
-+ u32 hash;
- struct dx_frame frames[2], *frame;
- struct ext4_dir_entry_2 *de, *top;
- struct buffer_head *bh;
-@@ -1022,8 +1019,18 @@ static struct buffer_head * ext4_dx_find
- int namelen = d_name->len;
- const u8 *name = d_name->name;
-
-- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-- return NULL;
-+ sb = dir->i_sb;
-+ /* NFS may look up ".." - look at dx_root directory block */
-+ if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
-+ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-+ return NULL;
-+ } else {
-+ frame = frames;
-+ frame->bh = NULL; /* for dx_release() */
-+ frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
-+ dx_set_block(frame->at, 0); /* dx_root block is 0 */
-+ }
-+ hash = hinfo.hash;
- do {
- block = dx_get_block(frame->at);
- if (!(bh = ext4_bread (NULL,dir, block, 0, err)))
-@@ -1049,10 +1056,10 @@ static struct buffer_head * ext4_dx_find
- }
- brelse(bh);
- /* Check to see if we should continue to search */
-- retval = ext4_htree_next_block(dir, hinfo.hash, frame,
-+ retval = ext4_htree_next_block(dir, hash, frame,
- frames, NULL);
- if (retval < 0) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "error reading index page in directory #%lu",
- dir->i_ino);
- *err = retval;
-@@ -1082,14 +1089,13 @@ static struct dentry *ext4_lookup(struct
- __u32 ino = le32_to_cpu(de->inode);
- brelse(bh);
- if (!ext4_valid_inum(dir->i_sb, ino)) {
-- ext4_error(dir->i_sb, "ext4_lookup",
-- "bad inode number: %u", ino);
-+ ext4_error(dir->i_sb, "bad inode number: %u", ino);
- return ERR_PTR(-EIO);
- }
- inode = ext4_iget(dir->i_sb, ino);
- if (unlikely(IS_ERR(inode))) {
- if (PTR_ERR(inode) == -ESTALE) {
-- ext4_error(dir->i_sb, __func__,
-+ ext4_error(dir->i_sb,
- "deleted inode referenced: %u",
- ino);
- return ERR_PTR(-EIO);
-@@ -1121,7 +1127,7 @@ struct dentry *ext4_get_parent(struct de
- brelse(bh);
-
- if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
-- ext4_error(child->d_inode->i_sb, "ext4_get_parent",
-+ ext4_error(child->d_inode->i_sb,
- "bad inode number: %u", ino);
- return ERR_PTR(-EIO);
- }
-@@ -1421,7 +1427,7 @@ static int make_indexed_dir(handle_t *ha
- de = (struct ext4_dir_entry_2 *)((char *)fde +
- ext4_rec_len_from_disk(fde->rec_len, blocksize));
- if ((char *) de >= (((char *) root) + blocksize)) {
-- ext4_error(dir->i_sb, __func__,
-+ ext4_error(dir->i_sb,
- "invalid rec_len for '..' in inode %lu",
- dir->i_ino);
- brelse(bh);
-@@ -1468,10 +1474,22 @@ static int make_indexed_dir(handle_t *ha
- frame->at = entries;
- frame->bh = bh;
- bh = bh2;
-+
-+ ext4_handle_dirty_metadata(handle, dir, frame->bh);
-+ ext4_handle_dirty_metadata(handle, dir, bh);
-+
- de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-- dx_release (frames);
-- if (!(de))
-+ if (!de) {
-+ /*
-+ * Even if the block split failed, we have to properly write
-+ * out all the changes we did so far. Otherwise we can end up
-+ * with corrupted filesystem.
-+ */
-+ ext4_mark_inode_dirty(handle, dir);
-+ dx_release(frames);
- return retval;
-+ }
-+ dx_release(frames);
-
- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
- brelse(bh);
-@@ -1588,8 +1606,7 @@ static int ext4_dx_add_entry(handle_t *h
-
- if (levels && (dx_get_count(frames->entries) ==
- dx_get_limit(frames->entries))) {
-- ext4_warning(sb, __func__,
-- "Directory index full!");
-+ ext4_warning(sb, "Directory index full!");
- err = -ENOSPC;
- goto cleanup;
- }
-@@ -1904,7 +1921,7 @@
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
- inode->i_nlink = 2;
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-- ext4_handle_dirty_metadata(handle, dir, dir_block);
-+ ext4_handle_dirty_metadata(handle, inode, dir_block);
- brelse(dir_block);
- ext4_mark_inode_dirty(handle, inode);
- err = ext4_add_entry(handle, dentry, inode);
-@@ -1943,11 +1960,11 @@ static int empty_dir(struct inode *inode
- if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
- !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
- if (err)
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "error %d reading directory #%lu offset 0",
- err, inode->i_ino);
- else
-- ext4_warning(inode->i_sb, __func__,
-+ ext4_warning(inode->i_sb,
- "bad directory (dir #%lu) - no data block",
- inode->i_ino);
- return 1;
-@@ -1958,7 +1975,7 @@ static int empty_dir(struct inode *inode
- !le32_to_cpu(de1->inode) ||
- strcmp(".", de->name) ||
- strcmp("..", de1->name)) {
-- ext4_warning(inode->i_sb, "empty_dir",
-+ ext4_warning(inode->i_sb,
- "bad directory (dir #%lu) - no `.' or `..'",
- inode->i_ino);
- brelse(bh);
-@@ -1976,7 +1993,7 @@ static int empty_dir(struct inode *inode
- offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
- if (!bh) {
- if (err)
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "error %d reading directory"
- " #%lu offset %u",
- err, inode->i_ino, offset);
-@@ -2198,7 +2215,7 @@ static int ext4_rmdir(struct inode *dir,
- if (retval)
- goto end_rmdir;
- if (!EXT4_DIR_LINK_EMPTY(inode))
-- ext4_warning(inode->i_sb, "ext4_rmdir",
-+ ext4_warning(inode->i_sb,
- "empty directory has too many links (%d)",
- inode->i_nlink);
- inode->i_version++;
-@@ -2250,7 +2267,7 @@ static int ext4_unlink(struct inode *dir
- goto end_unlink;
-
- if (!inode->i_nlink) {
-- ext4_warning(inode->i_sb, "ext4_unlink",
-+ ext4_warning(inode->i_sb,
- "Deleting nonexistent file (%lu), %d",
- inode->i_ino, inode->i_nlink);
- inode->i_nlink = 1;
-@@ -2497,7 +2514,7 @@ static int ext4_rename(struct inode *old
- }
- }
- if (retval) {
-- ext4_warning(old_dir->i_sb, "ext4_rename",
-+ ext4_warning(old_dir->i_sb,
- "Deleting old file (%lu), %d, error=%d",
- old_dir->i_ino, old_dir->i_nlink, retval);
- }
-diff -urpN linux-stage.orig/fs/ext4/resize.c linux-stage/fs/ext4/resize.c
---- linux-stage.orig/fs/ext4/resize.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/resize.c 2012-07-02 12:07:57.000000000 -0400
-@@ -48,63 +48,63 @@ static int verify_group_input(struct sup
-
- ext4_get_group_no_and_offset(sb, start, NULL, &offset);
- if (group != sbi->s_groups_count)
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Cannot add at group %u (only %u groups)",
- input->group, sbi->s_groups_count);
- else if (offset != 0)
-- ext4_warning(sb, __func__, "Last group not full");
-+ ext4_warning(sb, "Last group not full");
- else if (input->reserved_blocks > input->blocks_count / 5)
-- ext4_warning(sb, __func__, "Reserved blocks too high (%u)",
-+ ext4_warning(sb, "Reserved blocks too high (%u)",
- input->reserved_blocks);
- else if (free_blocks_count < 0)
-- ext4_warning(sb, __func__, "Bad blocks count %u",
-+ ext4_warning(sb, "Bad blocks count %u",
- input->blocks_count);
- else if (!(bh = sb_bread(sb, end - 1)))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Cannot read last block (%llu)",
- end - 1);
- else if (outside(input->block_bitmap, start, end))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Block bitmap not in group (block %llu)",
- (unsigned long long)input->block_bitmap);
- else if (outside(input->inode_bitmap, start, end))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Inode bitmap not in group (block %llu)",
- (unsigned long long)input->inode_bitmap);
- else if (outside(input->inode_table, start, end) ||
- outside(itend - 1, start, end))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Inode table not in group (blocks %llu-%llu)",
- (unsigned long long)input->inode_table, itend - 1);
- else if (input->inode_bitmap == input->block_bitmap)
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Block bitmap same as inode bitmap (%llu)",
- (unsigned long long)input->block_bitmap);
- else if (inside(input->block_bitmap, input->inode_table, itend))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Block bitmap (%llu) in inode table (%llu-%llu)",
- (unsigned long long)input->block_bitmap,
- (unsigned long long)input->inode_table, itend - 1);
- else if (inside(input->inode_bitmap, input->inode_table, itend))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Inode bitmap (%llu) in inode table (%llu-%llu)",
- (unsigned long long)input->inode_bitmap,
- (unsigned long long)input->inode_table, itend - 1);
- else if (inside(input->block_bitmap, start, metaend))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Block bitmap (%llu) in GDT table"
- " (%llu-%llu)",
- (unsigned long long)input->block_bitmap,
- start, metaend - 1);
- else if (inside(input->inode_bitmap, start, metaend))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Inode bitmap (%llu) in GDT table"
- " (%llu-%llu)",
- (unsigned long long)input->inode_bitmap,
- start, metaend - 1);
- else if (inside(input->inode_table, start, metaend) ||
- inside(itend - 1, start, metaend))
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Inode table (%llu-%llu) overlaps"
- "GDT table (%llu-%llu)",
- (unsigned long long)input->inode_table,
-@@ -364,7 +364,7 @@ static int verify_reserved_gdb(struct su
- while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
- if (le32_to_cpu(*p++) !=
- grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "reserved GDT %llu"
- " missing grp %d (%llu)",
- blk, grp,
-@@ -420,7 +420,7 @@ static int add_new_gdb(handle_t *handle,
- */
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "won't resize using backup superblock at %llu",
- (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
- return -EPERM;
-@@ -444,7 +444,7 @@ static int add_new_gdb(handle_t *handle,
-
- data = (__le32 *)dind->b_data;
- if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "new group %u GDT block %llu not reserved",
- input->group, gdblock);
- err = -EINVAL;
-@@ -468,7 +468,7 @@ static int add_new_gdb(handle_t *handle,
- GFP_NOFS);
- if (!n_group_desc) {
- err = -ENOMEM;
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "not enough memory for %lu groups", gdb_num + 1);
- goto exit_inode;
- }
-@@ -567,7 +567,7 @@ static int reserve_backup_gdb(handle_t *
- /* Get each reserved primary GDT block and verify it holds backups */
- for (res = 0; res < reserved_gdb; res++, blk++) {
- if (le32_to_cpu(*data) != blk) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "reserved block %llu"
- " not at offset %ld",
- blk,
-@@ -713,7 +713,7 @@ static void update_backups(struct super_
- */
- exit_err:
- if (err) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "can't update backup for group %u (err %d), "
- "forcing fsck on next reboot", group, err);
- sbi->s_mount_state &= ~EXT4_VALID_FS;
-@@ -753,20 +753,20 @@ int ext4_group_add(struct super_block *s
-
- if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Can't resize non-sparse filesystem further");
- return -EPERM;
- }
-
- if (ext4_blocks_count(es) + input->blocks_count <
- ext4_blocks_count(es)) {
-- ext4_warning(sb, __func__, "blocks_count overflow");
-+ ext4_warning(sb, "blocks_count overflow");
- return -EINVAL;
- }
-
- if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
- le32_to_cpu(es->s_inodes_count)) {
-- ext4_warning(sb, __func__, "inodes_count overflow");
-+ ext4_warning(sb, "inodes_count overflow");
- return -EINVAL;
- }
-
-@@ -774,13 +774,13 @@ int ext4_group_add(struct super_block *s
- if (!EXT4_HAS_COMPAT_FEATURE(sb,
- EXT4_FEATURE_COMPAT_RESIZE_INODE)
- || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "No reserved GDT blocks, can't resize");
- return -EPERM;
- }
- inode = ext4_iget(sb, EXT4_RESIZE_INO);
- if (IS_ERR(inode)) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "Error opening resize inode");
- return PTR_ERR(inode);
- }
-@@ -810,7 +810,7 @@ int ext4_group_add(struct super_block *s
-
- mutex_lock(&sbi->s_resize_lock);
- if (input->group != sbi->s_groups_count) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "multiple resizers run on filesystem!");
- err = -EBUSY;
- goto exit_journal;
-@@ -998,12 +998,12 @@ int ext4_group_extend(struct super_block
- " too large to resize to %llu blocks safely\n",
- sb->s_id, n_blocks_count);
- if (sizeof(sector_t) < 8)
-- ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled");
-+ ext4_warning(sb, "CONFIG_LBDAF not enabled");
- return -EINVAL;
- }
-
- if (n_blocks_count < o_blocks_count) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "can't shrink FS - resize aborted");
- return -EBUSY;
- }
-@@ -1012,7 +1012,7 @@ int ext4_group_extend(struct super_block
- ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
-
- if (last == 0) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "need to use ext2online to resize further");
- return -EPERM;
- }
-@@ -1020,7 +1020,7 @@ int ext4_group_extend(struct super_block
- add = EXT4_BLOCKS_PER_GROUP(sb) - last;
-
- if (o_blocks_count + add < o_blocks_count) {
-- ext4_warning(sb, __func__, "blocks_count overflow");
-+ ext4_warning(sb, "blocks_count overflow");
- return -EINVAL;
- }
-
-@@ -1028,7 +1028,7 @@ int ext4_group_extend(struct super_block
- add = n_blocks_count - o_blocks_count;
-
- if (o_blocks_count + add < n_blocks_count)
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "will only finish group (%llu"
- " blocks, %u new)",
- o_blocks_count + add, add);
-@@ -1036,7 +1036,7 @@ int ext4_group_extend(struct super_block
- /* See if the device is actually as big as what was requested */
- bh = sb_bread(sb, o_blocks_count + add - 1);
- if (!bh) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "can't read last block, resize aborted");
- return -ENOSPC;
- }
-@@ -1048,13 +1048,13 @@ int ext4_group_extend(struct super_block
- handle = ext4_journal_start_sb(sb, 3);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
-- ext4_warning(sb, __func__, "error %d on journal start", err);
-+ ext4_warning(sb, "error %d on journal start", err);
- goto exit_put;
- }
-
- mutex_lock(&EXT4_SB(sb)->s_resize_lock);
- if (o_blocks_count != ext4_blocks_count(es)) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "multiple resizers run on filesystem!");
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
- ext4_journal_stop(handle);
-@@ -1064,7 +1064,7 @@ int ext4_group_extend(struct super_block
-
- if ((err = ext4_journal_get_write_access(handle,
- EXT4_SB(sb)->s_sbh))) {
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "error %d on journal write access", err);
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
- ext4_journal_stop(handle);
-diff -urpN linux-stage.orig/fs/ext4/super.c linux-stage/fs/ext4/super.c
---- linux-stage.orig/fs/ext4/super.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/super.c 2012-07-02 12:07:57.000000000 -0400
-@@ -48,11 +48,16 @@
- #include "acl.h"
- #include "mballoc.h"
-
-+#define QFMT_OCFS2 3
-+#define QFMT_VFS_V1 4
-+
- #define CREATE_TRACE_POINTS
- #include <trace/events/ext4.h>
-
- struct proc_dir_entry *ext4_proc_root;
- static struct kset *ext4_kset;
-+static struct ext4_lazy_init *ext4_li_info;
-+static struct mutex ext4_li_mtx;
-
- static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
- unsigned long journal_devnum);
-@@ -337,7 +342,7 @@ static void ext4_handle_error(struct sup
- sb->s_id);
- }
-
--void ext4_error(struct super_block *sb, const char *function,
-+void __ext4_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
- {
- va_list args;
-@@ -351,6 +356,42 @@ void ext4_error(struct super_block *sb,
- ext4_handle_error(sb);
- }
-
-+void ext4_error_inode(const char *function, struct inode *inode,
-+ const char *fmt, ...)
-+{
-+ va_list args;
-+
-+ va_start(args, fmt);
-+ printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ",
-+ inode->i_sb->s_id, function, inode->i_ino, current->comm);
-+ vprintk(fmt, args);
-+ printk("\n");
-+ va_end(args);
-+
-+ ext4_handle_error(inode->i_sb);
-+}
-+
-+void ext4_error_file(const char *function, struct file *file,
-+ const char *fmt, ...)
-+{
-+ va_list args;
-+ struct inode *inode = file->f_dentry->d_inode;
-+ char pathname[80], *path;
-+
-+ va_start(args, fmt);
-+ path = d_path(&(file->f_path), pathname, sizeof(pathname));
-+ if (!path)
-+ path = "(unknown)";
-+ printk(KERN_CRIT
-+ "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ",
-+ inode->i_sb->s_id, function, inode->i_ino, current->comm, path);
-+ vprintk(fmt, args);
-+ printk("\n");
-+ va_end(args);
-+
-+ ext4_handle_error(inode->i_sb);
-+}
-+
- static const char *ext4_decode_error(struct super_block *sb, int errno,
- char nbuf[16])
- {
-@@ -454,7 +495,7 @@ void ext4_msg (struct super_block * sb,
- va_end(args);
- }
-
--void ext4_warning(struct super_block *sb, const char *function,
-+void __ext4_warning(struct super_block *sb, const char *function,
- const char *fmt, ...)
- {
- va_list args;
-@@ -511,7 +552,7 @@ void ext4_update_dynamic_rev(struct supe
- if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
- return;
-
-- ext4_warning(sb, __func__,
-+ ext4_warning(sb,
- "updating to rev %d because of new feature flag, "
- "running e2fsck is recommended",
- EXT4_DYNAMIC_REV);
-@@ -777,9 +818,22 @@ static inline void ext4_show_quota_optio
- #if defined(CONFIG_QUOTA)
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-- if (sbi->s_jquota_fmt)
-- seq_printf(seq, ",jqfmt=%s",
-- (sbi->s_jquota_fmt == QFMT_VFS_OLD) ? "vfsold" : "vfsv0");
-+ if (sbi->s_jquota_fmt) {
-+ char *fmtname = "";
-+
-+ switch (sbi->s_jquota_fmt) {
-+ case QFMT_VFS_OLD:
-+ fmtname = "vfsold";
-+ break;
-+ case QFMT_VFS_V0:
-+ fmtname = "vfsv0";
-+ break;
-+ case QFMT_VFS_V1:
-+ fmtname = "vfsv1";
-+ break;
-+ }
-+ seq_printf(seq, ",jqfmt=%s", fmtname);
-+ }
-
- if (sbi->s_qf_names[USRQUOTA])
- seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
-@@ -1096,13 +1150,14 @@ enum {
- Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- Opt_data_err_abort, Opt_data_err_ignore,
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
-- Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
-- Opt_usrquota, Opt_grpquota, Opt_i_version,
-+ Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
-+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-+ Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
- Opt_stripe, Opt_delalloc, Opt_nodelalloc,
- Opt_block_validity, Opt_noblock_validity,
- Opt_inode_readahead_blks, Opt_journal_ioprio,
- Opt_discard, Opt_nodiscard,
-+ Opt_init_inode_table, Opt_noinit_inode_table,
- };
-
- static const match_table_t tokens = {
-@@ -2709,6 +2764,21 @@ static int ext4_fill_super(struct super_
- get_random_bytes(&sbi->s_next_generation, sizeof(u32));
- spin_lock_init(&sbi->s_next_gen_lock);
-
-+ err = percpu_counter_init(&sbi->s_freeblocks_counter,
-+ ext4_count_free_blocks(sb));
-+ if (!err)
-+ err = percpu_counter_init(&sbi->s_freeinodes_counter,
-+ ext4_count_free_inodes(sb));
-+ if (!err)
-+ err = percpu_counter_init(&sbi->s_dirs_counter,
-+ ext4_count_dirs(sb));
-+ if (!err)
-+ err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
-+ if (err) {
-+ ext4_msg(sb, KERN_ERR, "insufficient memory");
-+ goto failed_mount3;
-+ }
-+
- sbi->s_stripe = ext4_get_stripe_size(sbi);
- sbi->s_max_writeback_mb_bump = 128;
-
-@@ -2828,20 +2898,6 @@ static int ext4_fill_super(struct super_
- set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
-
- no_journal:
-- err = percpu_counter_init(&sbi->s_freeblocks_counter,
-- ext4_count_free_blocks(sb));
-- if (!err)
-- err = percpu_counter_init(&sbi->s_freeinodes_counter,
-- ext4_count_free_inodes(sb));
-- if (!err)
-- err = percpu_counter_init(&sbi->s_dirs_counter,
-- ext4_count_dirs(sb));
-- if (!err)
-- err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
-- if (err) {
-- ext4_msg(sb, KERN_ERR, "insufficient memory");
-- goto failed_mount_wq;
-- }
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
- ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
-@@ -2955,7 +3011,9 @@ no_journal:
- } else
- descr = "out journal";
-
-- ext4_msg(sb, KERN_INFO, "mounted filesystem with%s", descr);
-+ ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-+ "Opts: %s%s", descr, sbi->s_es->s_mount_opts,
-+ *sbi->s_es->s_mount_opts ? "; " : "");
-
- lock_kernel();
- return 0;
-@@ -2974,10 +3032,6 @@ failed_mount_wq:
- jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
- }
-- percpu_counter_destroy(&sbi->s_freeblocks_counter);
-- percpu_counter_destroy(&sbi->s_freeinodes_counter);
-- percpu_counter_destroy(&sbi->s_dirs_counter);
-- percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
- failed_mount3:
- if (sbi->s_flex_groups) {
- if (is_vmalloc_addr(sbi->s_flex_groups))
-@@ -2985,6 +3039,10 @@ failed_mount3:
- else
- kfree(sbi->s_flex_groups);
- }
-+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
-+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
-+ percpu_counter_destroy(&sbi->s_dirs_counter);
-+ percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
- failed_mount2:
- for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
-@@ -3377,9 +3435,9 @@ static void ext4_clear_journal_err(struc
- char nbuf[16];
-
- errstr = ext4_decode_error(sb, j_errno, nbuf);
-- ext4_warning(sb, __func__, "Filesystem error recorded "
-+ ext4_warning(sb, "Filesystem error recorded "
- "from previous mount: %s", errstr);
-- ext4_warning(sb, __func__, "Marking fs in need of "
-+ ext4_warning(sb, "Marking fs in need of "
- "filesystem check.");
-
- EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
-@@ -4007,11 +4065,21 @@ static struct file_system_type ext4_fs_t
- .fs_flags = FS_REQUIRES_DEV,
- };
-
-+static int __init ext4_init_feat_adverts(void)
-+{
-+ return 0;
-+}
-+
-+static void ext4_exit_feat_adverts(void)
-+{
-+}
-+
- static int __init init_ext4_fs(void)
- {
- int err;
-
- ext4_check_flag_values();
-+
- err = init_ext4_system_zone();
- if (err)
- return err;
-@@ -4019,6 +4087,9 @@ static int __init init_ext4_fs(void)
- if (!ext4_kset)
- goto out4;
- ext4_proc_root = proc_mkdir("fs/ext4", NULL);
-+
-+ err = ext4_init_feat_adverts();
-+
- err = init_ext4_mballoc();
- if (err)
- goto out3;
-@@ -4032,6 +4103,9 @@ static int __init init_ext4_fs(void)
- err = register_filesystem(&ext4_fs_type);
- if (err)
- goto out;
-+
-+ ext4_li_info = NULL;
-+ mutex_init(&ext4_li_mtx);
- return 0;
- out:
- destroy_inodecache();
-@@ -4040,6 +4114,7 @@ out1:
- out2:
- exit_ext4_mballoc();
- out3:
-+ ext4_exit_feat_adverts();
- remove_proc_entry("fs/ext4", NULL);
- kset_unregister(ext4_kset);
- out4:
-@@ -4053,6 +4128,7 @@ static void __exit exit_ext4_fs(void)
- destroy_inodecache();
- exit_ext4_xattr();
- exit_ext4_mballoc();
-+ ext4_exit_feat_adverts();
- remove_proc_entry("fs/ext4", NULL);
- kset_unregister(ext4_kset);
- exit_ext4_system_zone();
-diff -urpN linux-stage.orig/fs/ext4/xattr.c linux-stage/fs/ext4/xattr.c
---- linux-stage.orig/fs/ext4/xattr.c 2012-07-02 12:07:23.000000000 -0400
-+++ linux-stage/fs/ext4/xattr.c 2012-07-02 12:07:57.000000000 -0400
-@@ -227,7 +227,7 @@ ext4_xattr_block_get(struct inode *inode
- ea_bdebug(bh, "b_count=%d, refcount=%d",
- atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
- if (ext4_xattr_check_block(bh)) {
--bad_block: ext4_error(inode->i_sb, __func__,
-+bad_block: ext4_error(inode->i_sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
-@@ -369,7 +369,7 @@ ext4_xattr_block_list(struct inode *inod
- ea_bdebug(bh, "b_count=%d, refcount=%d",
- atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
- if (ext4_xattr_check_block(bh)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
-@@ -661,7 +661,7 @@ ext4_xattr_block_find(struct inode *inod
- atomic_read(&(bs->bh->b_count)),
- le32_to_cpu(BHDR(bs->bh)->h_refcount));
- if (ext4_xattr_check_block(bs->bh)) {
-- ext4_error(sb, __func__,
-+ ext4_error(sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
-@@ -875,7 +875,7 @@ cleanup_dquot:
- goto cleanup;
-
- bad_block:
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- goto cleanup;
-@@ -1190,7 +1190,7 @@ retry:
- if (!bh)
- goto cleanup;
- if (ext4_xattr_check_block(bh)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
-@@ -1367,14 +1367,14 @@ ext4_xattr_delete_inode(handle_t *handle
- goto cleanup;
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: block %llu read error", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- goto cleanup;
- }
- if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
- BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: bad block %llu", inode->i_ino,
- EXT4_I(inode)->i_file_acl);
- goto cleanup;
-@@ -1501,7 +1501,7 @@ again:
- }
- bh = sb_bread(inode->i_sb, ce->e_block);
- if (!bh) {
-- ext4_error(inode->i_sb, __func__,
-+ ext4_error(inode->i_sb,
- "inode %lu: block %lu read error",
- inode->i_ino, (unsigned long) ce->e_block);
- } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=