From: Yang Sheng Date: Mon, 22 Jun 2015 12:57:45 +0000 (-0400) Subject: LU-4416 ldiskfs: enable support for SLES12 X-Git-Tag: 2.7.57~43 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=9067f1b50aaeae2a79dfa9f777f96637c3d4bb05 LU-4416 ldiskfs: enable support for SLES12 This patch adds support for the 3.12-based SLE12 kernel. Additonally this work separates out the basic needed patches for the upstream 3.12 kernel which can be shared with other distros as well as future upstream kerrnels. This will help with book keeping ldiskfs patches that need to be pushed upstream. kernel version SLES12 [3.12.43-52.6] Signed-off-by: Jeff Mahoney Signed-off-by: James Simmons Change-Id: I2b8d83cddf8fd27617657348ea6d7c3a48e9472c Signed-off-by: Yang Sheng Reviewed-on: http://review.whamcloud.com/10165 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Oleg Drokin --- diff --git a/config/lustre-build-ldiskfs.m4 b/config/lustre-build-ldiskfs.m4 index 6f1ae82..2217f50 100644 --- a/config/lustre-build-ldiskfs.m4 +++ b/config/lustre-build-ldiskfs.m4 @@ -13,6 +13,7 @@ AS_IF([test x$RHEL_KERNEL = xyes], [ 6[0-3]) LDISKFS_SERIES="2.6-rhel6.series" ;; esac ], [test x$SUSE_KERNEL = xyes], [ + AS_VERSION_COMPARE([$LINUXRELEASE],[3.12.0],[ AS_VERSION_COMPARE([$LINUXRELEASE],[3.0.0],[ AS_VERSION_COMPARE([$LINUXRELEASE],[2.6.32], [], [LDISKFS_SERIES="2.6-sles11.series"],[LDISKFS_SERIES="2.6-sles11.series"])], @@ -24,7 +25,7 @@ AS_IF([test x$RHEL_KERNEL = xyes], [ 3) LDISKFS_SERIES="3.0-sles11sp3.series" ;; esac - ]) + ])],[LDISKFS_SERIES="3.12-sles12.series"],[LDISKFS_SERIES="3.12-sles12.series"]) ]) AS_IF([test -z "$LDISKFS_SERIES"], [AC_MSG_WARN([Unknown kernel version $LDISKFS_VERSIONRELEASE])]) diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch new file mode 100644 index 0000000..b8e15ec --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-data-in-dirent.patch @@ -0,0 +1,732 @@ +this patch implements feature which allows ext4 fs users (e.g. Lustre) +to store data in ext4 dirent. +data is stored in ext4 dirent after file-name, this space is accounted +in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data +is present. + +make use of dentry->d_fsdata to pass fid to ext4. so no +changes in ext4_add_entry() interface required. + +Index: linux-3.12.39-47.1/fs/ext4/dir.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/dir.c ++++ linux-3.12.39-47.1/fs/ext4/dir.c +@@ -70,11 +70,11 @@ int __ext4_check_dir_entry(const char *f + const int rlen = ext4_rec_len_from_disk(de->rec_len, + dir->i_sb->s_blocksize); + +- if (unlikely(rlen < EXT4_DIR_REC_LEN(1))) ++ if (unlikely(rlen < __EXT4_DIR_REC_LEN(1))) + error_msg = "rec_len is smaller than minimal"; + else if (unlikely(rlen % 4 != 0)) + error_msg = "rec_len % 4 != 0"; +- else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) ++ else if (unlikely(rlen < EXT4_DIR_REC_LEN(de))) + error_msg = "rec_len is too small for name_len"; + else if (unlikely(((char *) de - buf) + rlen > size)) + error_msg = "directory entry across range"; +@@ -202,7 +202,7 @@ static int ext4_readdir(struct file *fil + * failure will be detected in the + * dirent test below. */ + if (ext4_rec_len_from_disk(de->rec_len, +- sb->s_blocksize) < EXT4_DIR_REC_LEN(1)) ++ sb->s_blocksize) < __EXT4_DIR_REC_LEN(1)) + break; + i += ext4_rec_len_from_disk(de->rec_len, + sb->s_blocksize); +@@ -421,12 +421,17 @@ int ext4_htree_store_dirent(struct file + struct fname *fname, *new_fn; + struct dir_private_info *info; + int len; ++ int extra_data = 0; + + info = dir_file->private_data; + p = &info->root.rb_node; + + /* Create and allocate the fname structure */ +- len = sizeof(struct fname) + dirent->name_len + 1; ++ if (dirent->file_type & EXT4_DIRENT_LUFID) ++ extra_data = ext4_get_dirent_data_len(dirent); ++ ++ len = sizeof(struct fname) + dirent->name_len + extra_data + 1; ++ + new_fn = kzalloc(len, GFP_KERNEL); + if (!new_fn) + return -ENOMEM; +@@ -435,7 +440,7 @@ int ext4_htree_store_dirent(struct file + new_fn->inode = le32_to_cpu(dirent->inode); + new_fn->name_len = dirent->name_len; + new_fn->file_type = dirent->file_type; +- memcpy(new_fn->name, dirent->name, dirent->name_len); ++ memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data); + new_fn->name[dirent->name_len] = 0; + + while (*p) { +Index: linux-3.12.39-47.1/fs/ext4/ext4.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ext4.h ++++ linux-3.12.39-47.1/fs/ext4/ext4.h +@@ -960,6 +960,7 @@ struct ext4_inode_info { + #define EXT4_MOUNT_ERRORS_MASK 0x00070 + #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */ + #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/ ++#define EXT4_MOUNT_DIRDATA 0x00200 /* Data in directory entries*/ + #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */ + #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */ + #define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */ +@@ -1538,6 +1539,7 @@ static inline void ext4_clear_state_flag + EXT4_FEATURE_INCOMPAT_64BIT| \ + EXT4_FEATURE_INCOMPAT_FLEX_BG| \ + EXT4_FEATURE_INCOMPAT_MMP | \ ++ EXT4_FEATURE_INCOMPAT_DIRDATA| \ + EXT4_FEATURE_INCOMPAT_INLINE_DATA) + #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ +@@ -1644,6 +1646,43 @@ struct ext4_dir_entry_tail { + #define EXT4_FT_SYMLINK 7 + + #define EXT4_FT_MAX 8 ++#define EXT4_FT_MASK 0xf ++ ++#if EXT4_FT_MAX > EXT4_FT_MASK ++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK" ++#endif ++ ++/* ++ * d_type has 4 unused bits, so it can hold four types data. these different ++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be ++ * stored, in flag order, after file-name in ext4 dirent. ++*/ ++/* ++ * this flag is added to d_type if ext4 dirent has extra data after ++ * filename. this data length is variable and length is stored in first byte ++ * of data. data start after filename NUL byte. ++ * This is used by Lustre FS. ++ */ ++#define EXT4_DIRENT_LUFID 0x10 ++ ++#define EXT4_LUFID_MAGIC 0xAD200907UL ++struct ext4_dentry_param { ++ __u32 edp_magic; /* EXT4_LUFID_MAGIC */ ++ char edp_len; /* size of edp_data in bytes */ ++ char edp_data[0]; /* packed array of data */ ++} __packed; ++ ++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb, ++ struct ext4_dentry_param *p) ++ ++{ ++ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA)) ++ return NULL; ++ if (p && p->edp_magic == EXT4_LUFID_MAGIC) ++ return &p->edp_len; ++ else ++ return NULL; ++} + + #define EXT4_FT_DIR_CSUM 0xDE + +@@ -1654,8 +1693,11 @@ struct ext4_dir_entry_tail { + */ + #define EXT4_DIR_PAD 4 + #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1) +-#define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \ ++#define __EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \ + ~EXT4_DIR_ROUND) ++#define EXT4_DIR_REC_LEN(de) (__EXT4_DIR_REC_LEN((de)->name_len +\ ++ ext4_get_dirent_data_len(de))) ++ + #define EXT4_MAX_REC_LEN ((1<<16)-1) + + /* +@@ -1991,11 +2033,11 @@ extern int ext4_find_dest_de(struct inod + struct buffer_head *bh, + void *buf, int buf_size, + const char *name, int namelen, +- struct ext4_dir_entry_2 **dest_de); ++ struct ext4_dir_entry_2 **dest_de, int *dlen); + void ext4_insert_dentry(struct inode *inode, + struct ext4_dir_entry_2 *de, + int buf_size, +- const char *name, int namelen); ++ const char *name, int namelen, void *data); + static inline void ext4_update_dx_flag(struct inode *inode) + { + if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, +@@ -2008,11 +2050,18 @@ static unsigned char ext4_filetype_table + + static inline unsigned char get_dtype(struct super_block *sb, int filetype) + { ++ int fl_index = filetype & EXT4_FT_MASK; ++ + if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) || +- (filetype >= EXT4_FT_MAX)) ++ (fl_index >= EXT4_FT_MAX)) + return DT_UNKNOWN; + +- return ext4_filetype_table[filetype]; ++ if (!test_opt(sb, DIRDATA)) ++ return ext4_filetype_table[fl_index]; ++ ++ return (ext4_filetype_table[fl_index]) | ++ (filetype & EXT4_DIRENT_LUFID); ++ + } + + /* fsync.c */ +@@ -2159,7 +2208,7 @@ extern int ext4_delete_entry(handle_t *h + struct ext4_dir_entry_2 *de_del, + struct buffer_head *bh); + extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir, +- struct inode *inode); ++ struct inode *inode, const void *, const void *); + extern int search_dir(struct buffer_head *bh, + char *search_buf, + int buf_size, +@@ -2836,6 +2885,28 @@ extern struct mutex ext4__aio_mutex[EXT4 + extern int ext4_resize_begin(struct super_block *sb); + extern void ext4_resize_end(struct super_block *sb); + ++/* ++ * Compute the total directory entry data length. ++ * This includes the filename and an implicit NUL terminator (always present), ++ * and optional extensions. Each extension has a bit set in the high 4 bits of ++ * de->file_type, and the extension length is the first byte in each entry. ++ */ ++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de) ++{ ++ char *len = de->name + de->name_len + 1 /* NUL terminator */; ++ int dlen = 0; ++ __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4; ++ ++ while (extra_data_flags) { ++ if (extra_data_flags & 1) { ++ dlen += *len + (dlen == 0); ++ len += *len; ++ } ++ extra_data_flags >>= 1; ++ } ++ return dlen; ++} ++ + #endif /* __KERNEL__ */ + + #endif /* _EXT4_H */ +Index: linux-3.12.39-47.1/fs/ext4/namei.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/namei.c ++++ linux-3.12.39-47.1/fs/ext4/namei.c +@@ -239,7 +239,8 @@ static unsigned dx_get_count(struct dx_e + static unsigned dx_get_limit(struct dx_entry *entries); + static void dx_set_count(struct dx_entry *entries, unsigned value); + static void dx_set_limit(struct dx_entry *entries, unsigned value); +-static unsigned dx_root_limit(struct inode *dir, unsigned infosize); ++static inline unsigned dx_root_limit(struct inode *dir, ++ struct ext4_dir_entry_2 *dot_de, unsigned infosize); + static unsigned dx_node_limit(struct inode *dir); + static struct dx_frame *dx_probe(const struct qstr *d_name, + struct inode *dir, +@@ -500,11 +501,12 @@ ext4_next_entry(struct ext4_dir_entry_2 + */ + struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de) + { ++ BUG_ON(de->name_len != 1); + /* get dotdot first */ +- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1)); ++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de)); + + /* dx root info is after dotdot entry */ +- de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2)); ++ de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de)); + + return (struct dx_root_info *)de; + } +@@ -549,10 +551,16 @@ static inline void dx_set_limit(struct d + ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); + } + +-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) ++static inline unsigned dx_root_limit(struct inode *dir, ++ struct ext4_dir_entry_2 *dot_de, unsigned infosize) + { +- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - +- EXT4_DIR_REC_LEN(2) - infosize; ++ struct ext4_dir_entry_2 *dotdot_de; ++ unsigned entry_space; ++ ++ BUG_ON(dot_de->name_len != 1); ++ dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize); ++ entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(dot_de) - ++ EXT4_DIR_REC_LEN(dotdot_de) - infosize; + + if (ext4_has_metadata_csum(dir->i_sb)) + entry_space -= sizeof(struct dx_tail); +@@ -561,7 +569,7 @@ static inline unsigned dx_root_limit(str + + static inline unsigned dx_node_limit(struct inode *dir) + { +- unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); ++ unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0); + + if (ext4_has_metadata_csum(dir->i_sb)) + entry_space -= sizeof(struct dx_tail); +@@ -611,7 +619,7 @@ static struct stats dx_show_leaf(struct + printk(":%x.%u ", h.hash, + (unsigned) ((char *) de - base)); + } +- space += EXT4_DIR_REC_LEN(de->name_len); ++ space += EXT4_DIR_REC_LEN(de); + names++; + } + de = ext4_next_entry(de, size); +@@ -719,12 +727,15 @@ dx_probe(const struct qstr *d_name, stru + + entries = (struct dx_entry *)(((char *)info) + info->info_length); + +- if (dx_get_limit(entries) != dx_root_limit(dir, +- info->info_length)) { ++ if (dx_get_limit(entries) != ++ dx_root_limit(dir, (struct ext4_dir_entry_2 *)bh->b_data, ++ info->info_length)) { + ext4_warning(dir->i_sb, "dx entry: limit != root limit " + "inode #%lu: dx entry: limit %u != root limit %u", + dir->i_ino, dx_get_limit(entries), +- dx_root_limit(dir, info->info_length)); ++ dx_root_limit(dir, ++ (struct ext4_dir_entry_2 *)bh->b_data, ++ info->info_length)); + brelse(bh); + *err = ERR_BAD_DX_DIR; + goto fail; +@@ -916,7 +927,7 @@ static int htree_dirblock_to_tree(struct + de = (struct ext4_dir_entry_2 *) bh->b_data; + top = (struct ext4_dir_entry_2 *) ((char *) de + + dir->i_sb->s_blocksize - +- EXT4_DIR_REC_LEN(0)); ++ __EXT4_DIR_REC_LEN(0)); + for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { + if (ext4_check_dir_entry(dir, NULL, de, bh, + bh->b_data, bh->b_size, +@@ -1508,7 +1519,7 @@ dx_move_dirents(char *from, char *to, st + while (count--) { + struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) + (from + (map->offs<<2)); +- rec_len = EXT4_DIR_REC_LEN(de->name_len); ++ rec_len = EXT4_DIR_REC_LEN(de); + memcpy (to, de, rec_len); + ((struct ext4_dir_entry_2 *) to)->rec_len = + ext4_rec_len_to_disk(rec_len, blocksize); +@@ -1532,7 +1543,7 @@ static struct ext4_dir_entry_2* dx_pack_ + while ((char*)de < base + blocksize) { + next = ext4_next_entry(de, blocksize); + if (de->inode && de->name_len) { +- rec_len = EXT4_DIR_REC_LEN(de->name_len); ++ rec_len = EXT4_DIR_REC_LEN(de); + if (de > to) + memmove(to, de, rec_len); + to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); +@@ -1663,14 +1674,16 @@ int ext4_find_dest_de(struct inode *dir, + struct buffer_head *bh, + void *buf, int buf_size, + const char *name, int namelen, +- struct ext4_dir_entry_2 **dest_de) ++ struct ext4_dir_entry_2 **dest_de, int *dlen) + { + struct ext4_dir_entry_2 *de; +- unsigned short reclen = EXT4_DIR_REC_LEN(namelen); ++ unsigned short reclen = __EXT4_DIR_REC_LEN(namelen) + ++ (dlen ? *dlen : 0); + int nlen, rlen; + unsigned int offset = 0; + char *top; + ++ dlen ? *dlen = 0 : 0; /* default set to 0 */ + de = (struct ext4_dir_entry_2 *)buf; + top = buf + buf_size - reclen; + while ((char *) de <= top) { +@@ -1679,10 +1692,26 @@ int ext4_find_dest_de(struct inode *dir, + return -EIO; + if (ext4_match(namelen, name, de)) + return -EEXIST; +- nlen = EXT4_DIR_REC_LEN(de->name_len); ++ nlen = EXT4_DIR_REC_LEN(de); + rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); + if ((de->inode ? rlen - nlen : rlen) >= reclen) + break; ++ /* Then for dotdot entries, check for the smaller space ++ * required for just the entry, no FID */ ++ if (namelen == 2 && memcmp(name, "..", 2) == 0) { ++ if ((de->inode ? rlen - nlen : rlen) >= ++ __EXT4_DIR_REC_LEN(namelen)) { ++ /* set dlen=1 to indicate not ++ * enough space store fid */ ++ dlen ? *dlen = 1 : 0; ++ break; ++ } ++ /* The new ".." entry must be written over the ++ * previous ".." entry, which is the first ++ * entry traversed by this scan. If it doesn't ++ * fit, something is badly wrong, so -EIO. */ ++ return -EIO; ++ } + de = (struct ext4_dir_entry_2 *)((char *)de + rlen); + offset += rlen; + } +@@ -1696,12 +1725,12 @@ int ext4_find_dest_de(struct inode *dir, + void ext4_insert_dentry(struct inode *inode, + struct ext4_dir_entry_2 *de, + int buf_size, +- const char *name, int namelen) ++ const char *name, int namelen, void *data) + { + + int nlen, rlen; + +- nlen = EXT4_DIR_REC_LEN(de->name_len); ++ nlen = EXT4_DIR_REC_LEN(de); + rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); + if (de->inode) { + struct ext4_dir_entry_2 *de1 = +@@ -1715,6 +1744,11 @@ void ext4_insert_dentry(struct inode *in + ext4_set_de_type(inode->i_sb, de, inode->i_mode); + de->name_len = namelen; + memcpy(de->name, name, namelen); ++ if (data) { ++ de->name[namelen] = 0; ++ memcpy(&de->name[namelen + 1], data, *(char *)data); ++ de->file_type |= EXT4_DIRENT_LUFID; ++ } + } + /* + * Add a new entry into a directory (leaf) block. If de is non-NULL, +@@ -1733,15 +1767,20 @@ static int add_dirent_to_buf(handle_t *h + int namelen = dentry->d_name.len; + unsigned int blocksize = dir->i_sb->s_blocksize; + int csum_size = 0; +- int err; ++ int err, dlen = 0; ++ unsigned char *data; + ++ data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *) ++ dentry->d_fsdata); + if (ext4_has_metadata_csum(inode->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + if (!de) { ++ if (data) ++ dlen = (*data) + 1; + err = ext4_find_dest_de(dir, inode, + bh, bh->b_data, blocksize - csum_size, +- name, namelen, &de); ++ name, namelen, &de, &dlen); + if (err) + return err; + } +@@ -1753,7 +1792,10 @@ static int add_dirent_to_buf(handle_t *h + } + + /* By now the buffer is marked for journaling */ +- ext4_insert_dentry(inode, de, blocksize, name, namelen); ++ /* If writing the short form of "dotdot", don't add the data section */ ++ if (dlen == 1) ++ data = NULL; ++ ext4_insert_dentry(inode, de, blocksize, name, namelen, data); + + /* + * XXX shouldn't update any times until successful +@@ -1864,7 +1906,8 @@ static int make_indexed_dir(handle_t *ha + + dx_set_block(entries, 1); + dx_set_count(entries, 1); +- dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info))); ++ dx_set_limit(entries, dx_root_limit(dir, ++ dot_de, sizeof(*dx_info))); + + /* Initialize as for dx_probe */ + hinfo.hash_version = dx_info->hash_version; +@@ -1907,6 +1950,8 @@ static int ext4_update_dotdot(handle_t * + struct buffer_head *dir_block; + struct ext4_dir_entry_2 *de; + int len, journal = 0, err = 0; ++ int dlen = 0; ++ char *data; + + if (IS_ERR(handle)) + return PTR_ERR(handle); +@@ -1922,19 +1967,24 @@ static int ext4_update_dotdot(handle_t * + /* the first item must be "." */ + assert(de->name_len == 1 && de->name[0] == '.'); + len = le16_to_cpu(de->rec_len); +- assert(len >= EXT4_DIR_REC_LEN(1)); +- if (len > EXT4_DIR_REC_LEN(1)) { ++ assert(len >= __EXT4_DIR_REC_LEN(1)); ++ if (len > __EXT4_DIR_REC_LEN(1)) { + BUFFER_TRACE(dir_block, "get_write_access"); + err = ext4_journal_get_write_access(handle, dir_block); + if (err) + goto out_journal; + + journal = 1; +- de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1)); ++ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de)); + } + +- len -= EXT4_DIR_REC_LEN(1); +- assert(len == 0 || len >= EXT4_DIR_REC_LEN(2)); ++ len -= EXT4_DIR_REC_LEN(de); ++ data = ext4_dentry_get_data(dir->i_sb, ++ (struct ext4_dentry_param *)dentry->d_fsdata); ++ if (data) ++ dlen = *data + 1; ++ assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen)); ++ + de = (struct ext4_dir_entry_2 *) + ((char *) de + le16_to_cpu(de->rec_len)); + if (!journal) { +@@ -1948,10 +1998,15 @@ static int ext4_update_dotdot(handle_t * + if (len > 0) + de->rec_len = cpu_to_le16(len); + else +- assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2)); ++ assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2)); + de->name_len = 2; + strcpy(de->name, ".."); +- ext4_set_de_type(dir->i_sb, de, S_IFDIR); ++ if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) { ++ de->name[2] = 0; ++ memcpy(&de->name[2 + 1], data, *data); ++ ext4_set_de_type(dir->i_sb, de, S_IFDIR); ++ de->file_type |= EXT4_DIRENT_LUFID; ++ } + + out_journal: + if (journal) { +@@ -2461,37 +2516,70 @@ err_unlock_inode: + return err; + } + ++struct tp_block { ++ struct inode *inode; ++ void *data1; ++ void *data2; ++}; ++ + struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, + struct ext4_dir_entry_2 *de, + int blocksize, int csum_size, + unsigned int parent_ino, int dotdot_real_len) + { ++ void *data1 = NULL, *data2 = NULL; ++ int dot_reclen = 0; ++ ++ if (dotdot_real_len == 10) { ++ struct tp_block *tpb = (struct tp_block *)inode; ++ data1 = tpb->data1; ++ data2 = tpb->data2; ++ inode = tpb->inode; ++ dotdot_real_len = 0; ++ } + de->inode = cpu_to_le32(inode->i_ino); + de->name_len = 1; +- de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), +- blocksize); + strcpy(de->name, "."); + ext4_set_de_type(inode->i_sb, de, S_IFDIR); + ++ /* get packed fid data*/ ++ data1 = ext4_dentry_get_data(inode->i_sb, ++ (struct ext4_dentry_param *) data1); ++ if (data1) { ++ de->name[1] = 0; ++ memcpy(&de->name[2], data1, *(char *) data1); ++ de->file_type |= EXT4_DIRENT_LUFID; ++ } ++ de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de)); ++ dot_reclen = cpu_to_le16(de->rec_len); + de = ext4_next_entry(de, blocksize); + de->inode = cpu_to_le32(parent_ino); + de->name_len = 2; ++ strcpy(de->name, ".."); ++ ext4_set_de_type(inode->i_sb, de, S_IFDIR); ++ data2 = ext4_dentry_get_data(inode->i_sb, ++ (struct ext4_dentry_param *) data2); ++ if (data2) { ++ de->name[2] = 0; ++ memcpy(&de->name[3], data2, *(char *) data2); ++ de->file_type |= EXT4_DIRENT_LUFID; ++ } + if (!dotdot_real_len) + de->rec_len = ext4_rec_len_to_disk(blocksize - +- (csum_size + EXT4_DIR_REC_LEN(1)), ++ (csum_size + dot_reclen), + blocksize); + else + de->rec_len = ext4_rec_len_to_disk( +- EXT4_DIR_REC_LEN(de->name_len), blocksize); +- strcpy(de->name, ".."); +- ext4_set_de_type(inode->i_sb, de, S_IFDIR); ++ EXT4_DIR_REC_LEN(de), blocksize); + + return ext4_next_entry(de, blocksize); + } + + static int ext4_init_new_dir(handle_t *handle, struct inode *dir, +- struct inode *inode) ++ struct inode *inode, ++ const void *data1, const void *data2) + { ++ struct tp_block param; + struct buffer_head *dir_block = NULL; + struct ext4_dir_entry_2 *de; + struct ext4_dir_entry_tail *t; +@@ -2520,7 +2608,11 @@ static int ext4_init_new_dir(handle_t *h + if (err) + goto out; + de = (struct ext4_dir_entry_2 *)dir_block->b_data; +- ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); ++ param.inode = inode; ++ param.data1 = (void *)data1; ++ param.data2 = (void *)data2; ++ ext4_init_dot_dotdot((struct inode *)(¶m), de, blocksize, ++ csum_size, dir->i_ino, 10); + set_nlink(inode, 2); + if (csum_size) { + t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize); +@@ -2540,7 +2632,8 @@ out: + /* Initialize @inode as a subdirectory of @dir, and add the + * "." and ".." entries into the first directory block. */ + int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir, +- struct inode *inode) ++ struct inode *inode, ++ const void *data1, const void *data2) + { + if (IS_ERR(handle)) + return PTR_ERR(handle); +@@ -2550,7 +2643,7 @@ int ext4_add_dot_dotdot(handle_t *handle + + inode->i_op = &ext4_dir_inode_operations; + inode->i_fop = &ext4_dir_operations; +- return ext4_init_new_dir(handle, dir, inode); ++ return ext4_init_new_dir(handle, dir, inode, data1, data2); + } + EXPORT_SYMBOL(ext4_add_dot_dotdot); + +@@ -2578,7 +2671,7 @@ retry: + + inode->i_op = &ext4_dir_inode_operations; + inode->i_fop = &ext4_dir_operations; +- err = ext4_init_new_dir(handle, dir, inode); ++ err = ext4_init_new_dir(handle, dir, inode, NULL, NULL); + if (err) + goto out_clear_inode; + err = ext4_mark_inode_dirty(handle, inode); +@@ -2630,7 +2723,7 @@ static int empty_dir(struct inode *inode + } + + sb = inode->i_sb; +- if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { ++ if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2)) { + EXT4_ERROR_INODE(inode, "invalid size"); + return 1; + } +Index: linux-3.12.39-47.1/fs/ext4/inline.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inline.c ++++ linux-3.12.39-47.1/fs/ext4/inline.c +@@ -998,17 +998,17 @@ static int ext4_add_dirent_to_inline(han + int err; + struct ext4_dir_entry_2 *de; + +- reclen = EXT4_DIR_REC_LEN(namelen); ++ reclen = __EXT4_DIR_REC_LEN(namelen); + err = ext4_find_dest_de(dir, inode, iloc->bh, + inline_start, inline_size, +- name, namelen, &de); ++ name, namelen, &de, NULL); + if (err) + return err; + + err = ext4_journal_get_write_access(handle, iloc->bh); + if (err) + return err; +- ext4_insert_dentry(inode, de, inline_size, name, namelen); ++ ext4_insert_dentry(inode, de, inline_size, name, namelen, NULL); + + ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size); + +@@ -1078,7 +1078,7 @@ static int ext4_update_inline_dir(handle + int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE; + int new_size = get_max_inline_xattr_value_size(dir, iloc); + +- if (new_size - old_size <= EXT4_DIR_REC_LEN(1)) ++ if (new_size - old_size <= __EXT4_DIR_REC_LEN(1)) + return -ENOSPC; + + ret = ext4_update_inline_data(handle, dir, +@@ -1347,7 +1347,7 @@ int htree_inlinedir_to_tree(struct file + fake.name_len = 1; + strcpy(fake.name, "."); + fake.rec_len = ext4_rec_len_to_disk( +- EXT4_DIR_REC_LEN(fake.name_len), ++ EXT4_DIR_REC_LEN(&fake), + inline_size); + ext4_set_de_type(inode->i_sb, &fake, S_IFDIR); + de = &fake; +@@ -1357,7 +1357,7 @@ int htree_inlinedir_to_tree(struct file + fake.name_len = 2; + strcpy(fake.name, ".."); + fake.rec_len = ext4_rec_len_to_disk( +- EXT4_DIR_REC_LEN(fake.name_len), ++ EXT4_DIR_REC_LEN(&fake), + inline_size); + ext4_set_de_type(inode->i_sb, &fake, S_IFDIR); + de = &fake; +@@ -1452,8 +1452,8 @@ int ext4_read_inline_dir(struct file *fi + * So we will use extra_offset and extra_size to indicate them + * during the inline dir iteration. + */ +- dotdot_offset = EXT4_DIR_REC_LEN(1); +- dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2); ++ dotdot_offset = __EXT4_DIR_REC_LEN(1); ++ dotdot_size = dotdot_offset + __EXT4_DIR_REC_LEN(2); + extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE; + extra_size = extra_offset + inline_size; + +@@ -1488,7 +1488,7 @@ int ext4_read_inline_dir(struct file *fi + * failure will be detected in the + * dirent test below. */ + if (ext4_rec_len_from_disk(de->rec_len, extra_size) +- < EXT4_DIR_REC_LEN(1)) ++ < __EXT4_DIR_REC_LEN(1)) + break; + i += ext4_rec_len_from_disk(de->rec_len, + extra_size); +Index: linux-3.12.39-47.1/fs/ext4/super.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/super.c ++++ linux-3.12.39-47.1/fs/ext4/super.c +@@ -1133,7 +1133,7 @@ enum { + Opt_data_err_abort, Opt_data_err_ignore, + Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, + Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, +- Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, ++ Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata, + Opt_usrquota, Opt_grpquota, Opt_i_version, + Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, + Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, +@@ -1199,6 +1199,7 @@ static const match_table_t tokens = { + {Opt_stripe, "stripe=%u"}, + {Opt_delalloc, "delalloc"}, + {Opt_nodelalloc, "nodelalloc"}, ++ {Opt_dirdata, "dirdata"}, + {Opt_removed, "mblk_io_submit"}, + {Opt_removed, "nomblk_io_submit"}, + {Opt_block_validity, "block_validity"}, +@@ -1409,6 +1410,7 @@ static const struct mount_opts { + {Opt_usrjquota, 0, MOPT_Q}, + {Opt_grpjquota, 0, MOPT_Q}, + {Opt_offusrjquota, 0, MOPT_Q}, ++ {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET}, + {Opt_offgrpjquota, 0, MOPT_Q}, + {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, + {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch new file mode 100644 index 0000000..6aff973 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-inode-version.patch @@ -0,0 +1,59 @@ +Index: linux-3.12.39-47.1/fs/ext4/inode.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inode.c ++++ linux-3.12.39-47.1/fs/ext4/inode.c +@@ -4166,11 +4166,11 @@ struct inode *ext4_iget(struct super_blo + EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); + EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); + +- inode->i_version = le32_to_cpu(raw_inode->i_disk_version); ++ ei->i_fs_version = le32_to_cpu(raw_inode->i_disk_version); + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { + if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) +- inode->i_version |= +- (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; ++ ei->i_fs_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) ++ << 32; + } + + ret = 0; +@@ -4393,11 +4393,11 @@ static int ext4_do_update_inode(handle_t + raw_inode->i_block[block] = ei->i_data[block]; + } + +- raw_inode->i_disk_version = cpu_to_le32(inode->i_version); ++ raw_inode->i_disk_version = cpu_to_le32(ei->i_fs_version); + if (ei->i_extra_isize) { + if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) +- raw_inode->i_version_hi = +- cpu_to_le32(inode->i_version >> 32); ++ raw_inode->i_version_hi = cpu_to_le32(ei->i_fs_version ++ >> 32); + raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); + } + +Index: linux-3.12.39-47.1/fs/ext4/ialloc.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ialloc.c ++++ linux-3.12.39-47.1/fs/ext4/ialloc.c +@@ -973,6 +973,7 @@ got: + ei->i_dtime = 0; + ei->i_block_group = group; + ei->i_last_alloc_group = ~0; ++ ei->i_fs_version = 0; + + ext4_set_inode_flags(inode); + if (IS_DIRSYNC(inode)) +Index: linux-3.12.39-47.1/fs/ext4/ext4.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ext4.h ++++ linux-3.12.39-47.1/fs/ext4/ext4.h +@@ -929,6 +929,8 @@ struct ext4_inode_info { + tid_t i_sync_tid; + tid_t i_datasync_tid; + ++ __u64 i_fs_version; ++ + /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ + __u32 i_csum_seed; + }; diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch new file mode 100644 index 0000000..8f972ac --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-large-eas.patch @@ -0,0 +1,1009 @@ +This patch implements the large EA support in ext4. If the size of +an EA value is larger than the blocksize, then the EA value would +not be saved in the external EA block, instead it would be saved +in an external EA inode. So, the patch also helps support a larger +number of EAs. + +Index: linux-3.12.39-47.1/fs/ext4/ext4.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ext4.h ++++ linux-3.12.39-47.1/fs/ext4/ext4.h +@@ -1538,6 +1538,7 @@ static inline void ext4_clear_state_flag + EXT4_FEATURE_INCOMPAT_EXTENTS| \ + EXT4_FEATURE_INCOMPAT_64BIT| \ + EXT4_FEATURE_INCOMPAT_FLEX_BG| \ ++ EXT4_FEATURE_INCOMPAT_EA_INODE| \ + EXT4_FEATURE_INCOMPAT_MMP | \ + EXT4_FEATURE_INCOMPAT_DIRDATA| \ + EXT4_FEATURE_INCOMPAT_INLINE_DATA) +@@ -1938,6 +1939,12 @@ struct mmpd_data { + #define EXT4_MMP_MAX_CHECK_INTERVAL 300UL + + /* ++ * Maximum size of xattr attributes for FEATURE_INCOMPAT_EA_INODE 1Mb ++ * This limit is arbitrary, but is reasonable for the xattr API. ++ */ ++#define EXT4_XATTR_MAX_LARGE_EA_SIZE (1024 * 1024) ++ ++/* + * Function prototypes + */ + +@@ -1949,6 +1956,10 @@ struct mmpd_data { + # define ATTRIB_NORET __attribute__((noreturn)) + # define NORET_AND noreturn, + ++struct ext4_xattr_ino_array { ++ unsigned int xia_count; /* # of used item in the array */ ++ unsigned int xia_inodes[0]; ++}; + /* bitmap.c */ + extern unsigned int ext4_count_free(char *bitmap, unsigned numchars); + void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, +@@ -2162,6 +2173,7 @@ extern void ext4_set_inode_flags(struct + extern void ext4_get_inode_flags(struct ext4_inode_info *); + extern int ext4_alloc_da_blocks(struct inode *inode); + extern void ext4_set_aops(struct inode *inode); ++extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int chunk); + extern int ext4_writepage_trans_blocks(struct inode *); + extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); + extern int ext4_block_truncate_page(handle_t *handle, +Index: linux-3.12.39-47.1/fs/ext4/inode.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inode.c ++++ linux-3.12.39-47.1/fs/ext4/inode.c +@@ -135,8 +135,6 @@ static void ext4_invalidatepage(struct p + unsigned int length); + static int __ext4_journalled_writepage(struct page *page, unsigned int len); + static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); +-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, +- int pextents); + + /* + * Test whether an inode is a fast symlink. +@@ -182,6 +180,8 @@ void ext4_evict_inode(struct inode *inod + { + handle_t *handle; + int err; ++ int extra_credits = 3; ++ struct ext4_xattr_ino_array *lea_ino_array = NULL; + + trace_ext4_evict_inode(inode); + +@@ -235,8 +235,8 @@ void ext4_evict_inode(struct inode *inod + * protection against it + */ + sb_start_intwrite(inode->i_sb); +- handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, +- ext4_blocks_for_truncate(inode)+3); ++ ++ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, extra_credits); + if (IS_ERR(handle)) { + ext4_std_error(inode->i_sb, PTR_ERR(handle)); + /* +@@ -248,9 +248,33 @@ void ext4_evict_inode(struct inode *inod + sb_end_intwrite(inode->i_sb); + goto no_delete; + } +- + if (IS_SYNC(inode)) + ext4_handle_sync(handle); ++ ++ /* ++ * Delete xattr inode before deleting the main inode. ++ */ ++ err = ext4_xattr_delete_inode(handle, inode, &lea_ino_array); ++ if (err) { ++ ext4_warning(inode->i_sb, ++ "couldn't delete inode's xattr (err %d)", err); ++ goto stop_handle; ++ } ++ ++ if (!ext4_handle_has_enough_credits(handle, ++ ext4_blocks_for_truncate(inode) + extra_credits)) { ++ err = ext4_journal_extend(handle, ++ ext4_blocks_for_truncate(inode) + extra_credits); ++ if (err > 0) ++ err = ext4_journal_restart(handle, ++ ext4_blocks_for_truncate(inode) + extra_credits); ++ if (err != 0) { ++ ext4_warning(inode->i_sb, ++ "couldn't extend journal (err %d)", err); ++ goto stop_handle; ++ } ++ } ++ + inode->i_size = 0; + err = ext4_mark_inode_dirty(handle, inode); + if (err) { +@@ -305,8 +329,12 @@ void ext4_evict_inode(struct inode *inod + ext4_clear_inode(inode); + else + ext4_free_inode(handle, inode); ++ + ext4_journal_stop(handle); + sb_end_intwrite(inode->i_sb); ++ ++ if (lea_ino_array != NULL) ++ ext4_xattr_inode_array_free(inode, lea_ino_array); + return; + no_delete: + ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ +@@ -4754,7 +4782,7 @@ static int ext4_index_trans_blocks(struc + * + * Also account for superblock, inode, quota and xattr blocks + */ +-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, ++int ext4_meta_trans_blocks(struct inode *inode, int lblocks, + int pextents) + { + ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); +Index: linux-3.12.39-47.1/fs/ext4/xattr.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/xattr.c ++++ linux-3.12.39-47.1/fs/ext4/xattr.c +@@ -233,19 +233,26 @@ ext4_xattr_check_block(struct inode *ino + } + + static inline int +-ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) ++ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size, ++ struct inode *inode) + { + size_t value_size = le32_to_cpu(entry->e_value_size); + +- if (entry->e_value_block != 0 || value_size > size || +- le16_to_cpu(entry->e_value_offs) + value_size > size) ++ if ((entry->e_value_inum == 0) && ++ (le16_to_cpu(entry->e_value_offs) + value_size > size)) ++ return -EIO; ++ if (entry->e_value_inum != 0 && ++ (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) || ++ le32_to_cpu(entry->e_value_inum) > ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count))) + return -EIO; + return 0; + } + + static int + ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index, +- const char *name, size_t size, int sorted) ++ const char *name, size_t size, int sorted, ++ struct inode *inode) + { + struct ext4_xattr_entry *entry; + size_t name_len; +@@ -265,11 +272,103 @@ ext4_xattr_find_entry(struct ext4_xattr_ + break; + } + *pentry = entry; +- if (!cmp && ext4_xattr_check_entry(entry, size)) ++ if (!cmp && ext4_xattr_check_entry(entry, size, inode)) + return -EIO; + return cmp ? -ENODATA : 0; + } + ++/* ++ * Read the EA value from an inode. ++ */ ++static int ++ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size) ++{ ++ unsigned long block = 0; ++ struct buffer_head *bh = NULL; ++ int err, blocksize; ++ size_t csize, ret_size = 0; ++ ++ if (*size == 0) ++ return 0; ++ ++ blocksize = ea_inode->i_sb->s_blocksize; ++ ++ while (ret_size < *size) { ++ csize = (*size - ret_size) > blocksize ? blocksize : ++ *size - ret_size; ++ bh = ext4_bread(NULL, ea_inode, block, 0, &err); ++ if (!bh) { ++ *size = ret_size; ++ return err; ++ } ++ memcpy(buf, bh->b_data, csize); ++ brelse(bh); ++ ++ buf += csize; ++ block += 1; ++ ret_size += csize; ++ } ++ ++ *size = ret_size; ++ ++ return err; ++} ++ ++struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino, int *err) ++{ ++ struct inode *ea_inode = NULL; ++ ++ ea_inode = ext4_iget(parent->i_sb, ea_ino); ++ if (IS_ERR(ea_inode) || is_bad_inode(ea_inode)) { ++ ext4_error(parent->i_sb, "error while reading EA inode %d", ++ ea_ino); ++ *err = -EIO; ++ return NULL; ++ } ++ ++ if (ea_inode->i_xattr_inode_parent != parent->i_ino || ++ ea_inode->i_generation != parent->i_generation) { ++ ext4_error(parent->i_sb, "Backpointer from EA inode %d " ++ "to parent invalid.", ea_ino); ++ *err = -EINVAL; ++ goto error; ++ } ++ ++ if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) { ++ ext4_error(parent->i_sb, "EA inode %d does not have " ++ "EXT4_EA_INODE_FL flag set.\n", ea_ino); ++ *err = -EINVAL; ++ goto error; ++ } ++ ++ *err = 0; ++ return ea_inode; ++ ++error: ++ iput(ea_inode); ++ return NULL; ++} ++ ++/* ++ * Read the value from the EA inode. ++ */ ++static int ++ext4_xattr_inode_get(struct inode *inode, int ea_ino, void *buffer, ++ size_t *size) ++{ ++ struct inode *ea_inode = NULL; ++ int err; ++ ++ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err); ++ if (err) ++ return err; ++ ++ err = ext4_xattr_inode_read(ea_inode, buffer, size); ++ iput(ea_inode); ++ ++ return err; ++} ++ + static int + ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, + void *buffer, size_t buffer_size) +@@ -301,7 +400,8 @@ bad_block: + } + ext4_xattr_cache_insert(bh); + entry = BFIRST(bh); +- error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); ++ error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1, ++ inode); + if (error == -EIO) + goto bad_block; + if (error) +@@ -311,8 +411,16 @@ bad_block: + error = -ERANGE; + if (size > buffer_size) + goto cleanup; +- memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), +- size); ++ if (entry->e_value_inum != 0) { ++ error = ext4_xattr_inode_get(inode, ++ le32_to_cpu(entry->e_value_inum), ++ buffer, &size); ++ if (error) ++ goto cleanup; ++ } else { ++ memcpy(buffer, bh->b_data + ++ le16_to_cpu(entry->e_value_offs), size); ++ } + } + error = size; + +@@ -346,7 +454,7 @@ ext4_xattr_ibody_get(struct inode *inode + if (error) + goto cleanup; + error = ext4_xattr_find_entry(&entry, name_index, name, +- end - (void *)entry, 0); ++ end - (void *)entry, 0, inode); + if (error) + goto cleanup; + size = le32_to_cpu(entry->e_value_size); +@@ -354,8 +462,16 @@ ext4_xattr_ibody_get(struct inode *inode + error = -ERANGE; + if (size > buffer_size) + goto cleanup; +- memcpy(buffer, (void *)IFIRST(header) + +- le16_to_cpu(entry->e_value_offs), size); ++ if (entry->e_value_inum != 0) { ++ error = ext4_xattr_inode_get(inode, ++ le32_to_cpu(entry->e_value_inum), ++ buffer, &size); ++ if (error) ++ goto cleanup; ++ } else { ++ memcpy(buffer, (void *)IFIRST(header) + ++ le16_to_cpu(entry->e_value_offs), size); ++ } + } + error = size; + +@@ -597,7 +713,7 @@ static size_t ext4_xattr_free_space(stru + { + for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + *total += EXT4_XATTR_LEN(last->e_name_len); +- if (!last->e_value_block && last->e_value_size) { ++ if (last->e_value_inum == 0 && last->e_value_size > 0) { + size_t offs = le16_to_cpu(last->e_value_offs); + if (offs < *min_offs) + *min_offs = offs; +@@ -606,16 +722,172 @@ static size_t ext4_xattr_free_space(stru + return (*min_offs - ((void *)last - base) - sizeof(__u32)); + } + ++/* ++ * Write the value of the EA in an inode. ++ */ ++static int ++ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, ++ const void *buf, int bufsize) ++{ ++ struct buffer_head *bh = NULL; ++ unsigned long block = 0; ++ unsigned blocksize = ea_inode->i_sb->s_blocksize; ++ unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits; ++ int csize, wsize = 0; ++ int ret = 0; ++ int retries = 0; ++ ++retry: ++ while (ret >= 0 && ret < max_blocks) { ++ struct ext4_map_blocks map; ++ map.m_lblk = block += ret; ++ map.m_len = max_blocks -= ret; ++ ++ ret = ext4_map_blocks(handle, ea_inode, &map, ++ EXT4_GET_BLOCKS_CREATE); ++ if (ret <= 0) { ++ ext4_mark_inode_dirty(handle, ea_inode); ++ if (ret == -ENOSPC && ++ ext4_should_retry_alloc(ea_inode->i_sb, &retries)) { ++ ret = 0; ++ goto retry; ++ } ++ break; ++ } ++ } ++ ++ if (ret < 0) ++ return ret; ++ ++ block = 0; ++ while (wsize < bufsize) { ++ if (bh != NULL) ++ brelse(bh); ++ csize = (bufsize - wsize) > blocksize ? blocksize : ++ bufsize - wsize; ++ bh = ext4_getblk(handle, ea_inode, block, 0, &ret); ++ if (!bh) ++ goto out; ++ ret = ext4_journal_get_write_access(handle, bh); ++ if (ret) ++ goto out; ++ ++ memcpy(bh->b_data, buf, csize); ++ set_buffer_uptodate(bh); ++ ext4_handle_dirty_metadata(handle, ea_inode, bh); ++ ++ buf += csize; ++ wsize += csize; ++ block += 1; ++ } ++ ++ i_size_write(ea_inode, wsize); ++ ext4_update_i_disksize(ea_inode, wsize); ++ ++ ext4_mark_inode_dirty(handle, ea_inode); ++ ++out: ++ brelse(bh); ++ ++ return ret; ++} ++ ++/* ++ * Create an inode to store the value of a large EA. ++ */ ++static struct inode * ++ext4_xattr_inode_create(handle_t *handle, struct inode *inode) ++{ ++ struct inode *ea_inode = NULL; ++ ++ /* ++ * Let the next inode be the goal, so we try and allocate the EA inode ++ * in the same group, or nearby one. ++ */ ++ ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, ++ S_IFREG|0600, NULL, inode->i_ino + 1, NULL); ++ ++ if (!IS_ERR(ea_inode)) { ++ ea_inode->i_op = &ext4_file_inode_operations; ++ ea_inode->i_fop = &ext4_file_operations; ++ ext4_set_aops(ea_inode); ++ ea_inode->i_generation = inode->i_generation; ++ EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL; ++ ++ /* ++ * A back-pointer from EA inode to parent inode will be useful ++ * for e2fsck. ++ */ ++ ea_inode->i_xattr_inode_parent = inode->i_ino; ++ unlock_new_inode(ea_inode); ++ } ++ ++ return ea_inode; ++} ++ ++/* ++ * Unlink the inode storing the value of the EA. ++ */ ++int ++ext4_xattr_inode_unlink(struct inode *inode, int ea_ino) ++{ ++ struct inode *ea_inode = NULL; ++ int err; ++ ++ ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err); ++ if (err) ++ return err; ++ ++ clear_nlink(ea_inode); ++ iput(ea_inode); ++ ++ return 0; ++} ++ ++/* ++ * Add value of the EA in an inode. ++ */ + static int +-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s) ++ext4_xattr_inode_set(handle_t *handle, struct inode *inode, int *ea_ino, ++ const void *value, size_t value_len) ++{ ++ struct inode *ea_inode = NULL; ++ int err; ++ ++ /* Create an inode for the EA value */ ++ ea_inode = ext4_xattr_inode_create(handle, inode); ++ if (IS_ERR(ea_inode)) ++ return -1; ++ ++ err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); ++ if (err) ++ clear_nlink(ea_inode); ++ else ++ *ea_ino = ea_inode->i_ino; ++ ++ iput(ea_inode); ++ ++ return err; ++} ++ ++static int ++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s, ++ handle_t *handle, struct inode *inode) + { + struct ext4_xattr_entry *last; + size_t free, min_offs = s->end - s->base, name_len = strlen(i->name); ++ int in_inode = i->in_inode; ++ ++ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, ++ EXT4_FEATURE_INCOMPAT_EA_INODE) && ++ (EXT4_XATTR_SIZE(i->value_len) > ++ EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize))) ++ in_inode = 1; + + /* Compute min_offs and last. */ + last = s->first; + for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { +- if (!last->e_value_block && last->e_value_size) { ++ if (last->e_value_inum == 0 && last->e_value_size > 0) { + size_t offs = le16_to_cpu(last->e_value_offs); + if (offs < min_offs) + min_offs = offs; +@@ -623,16 +895,21 @@ ext4_xattr_set_entry(struct ext4_xattr_i + } + free = min_offs - ((void *)last - s->base) - sizeof(__u32); + if (!s->not_found) { +- if (!s->here->e_value_block && s->here->e_value_size) { ++ if (!in_inode && s->here->e_value_inum == 0 && ++ s->here->e_value_size > 0) { + size_t size = le32_to_cpu(s->here->e_value_size); + free += EXT4_XATTR_SIZE(size); + } + free += EXT4_XATTR_LEN(name_len); + } + if (i->value) { +- if (free < EXT4_XATTR_SIZE(i->value_len) || +- free < EXT4_XATTR_LEN(name_len) + +- EXT4_XATTR_SIZE(i->value_len)) ++ size_t value_len = EXT4_XATTR_SIZE(i->value_len); ++ ++ if (in_inode) ++ value_len = 0; ++ ++ if (free < value_len || ++ free < EXT4_XATTR_LEN(name_len) + value_len) + return -ENOSPC; + } + +@@ -646,7 +923,8 @@ ext4_xattr_set_entry(struct ext4_xattr_i + s->here->e_name_len = name_len; + memcpy(s->here->e_name, i->name, name_len); + } else { +- if (!s->here->e_value_block && s->here->e_value_size) { ++ if (s->here->e_value_offs > 0 && s->here->e_value_inum == 0 && ++ s->here->e_value_size > 0) { + void *first_val = s->base + min_offs; + size_t offs = le16_to_cpu(s->here->e_value_offs); + void *val = s->base + offs; +@@ -680,13 +958,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i + last = s->first; + while (!IS_LAST_ENTRY(last)) { + size_t o = le16_to_cpu(last->e_value_offs); +- if (!last->e_value_block && +- last->e_value_size && o < offs) ++ if (last->e_value_size > 0 && o < offs) + last->e_value_offs = + cpu_to_le16(o + size); + last = EXT4_XATTR_NEXT(last); + } + } ++ if (s->here->e_value_inum != 0) { ++ ext4_xattr_inode_unlink(inode, ++ le32_to_cpu(s->here->e_value_inum)); ++ s->here->e_value_inum = 0; ++ } + if (!i->value) { + /* Remove the old name. */ + size_t size = EXT4_XATTR_LEN(name_len); +@@ -700,10 +982,17 @@ ext4_xattr_set_entry(struct ext4_xattr_i + if (i->value) { + /* Insert the new value. */ + s->here->e_value_size = cpu_to_le32(i->value_len); +- if (i->value_len) { ++ if (in_inode) { ++ int ea_ino = le32_to_cpu(s->here->e_value_inum); ++ ext4_xattr_inode_set(handle, inode, &ea_ino, i->value, ++ i->value_len); ++ s->here->e_value_inum = cpu_to_le32(ea_ino); ++ s->here->e_value_offs = 0; ++ } else if (i->value_len) { + size_t size = EXT4_XATTR_SIZE(i->value_len); + void *val = s->base + min_offs - size; + s->here->e_value_offs = cpu_to_le16(min_offs - size); ++ s->here->e_value_inum = 0; + if (i->value == EXT4_ZERO_XATTR_VALUE) { + memset(val, 0, size); + } else { +@@ -753,7 +1042,7 @@ ext4_xattr_block_find(struct inode *inod + bs->s.end = bs->bh->b_data + bs->bh->b_size; + bs->s.here = bs->s.first; + error = ext4_xattr_find_entry(&bs->s.here, i->name_index, +- i->name, bs->bh->b_size, 1); ++ i->name, bs->bh->b_size, 1, inode); + if (error && error != -ENODATA) + goto cleanup; + bs->s.not_found = error; +@@ -777,8 +1066,6 @@ ext4_xattr_block_set(handle_t *handle, s + + #define header(x) ((struct ext4_xattr_header *)(x)) + +- if (i->value && i->value_len > sb->s_blocksize) +- return -ENOSPC; + if (s->base) { + ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev, + bs->bh->b_blocknr); +@@ -794,7 +1081,7 @@ ext4_xattr_block_set(handle_t *handle, s + ce = NULL; + } + ea_bdebug(bs->bh, "modifying in-place"); +- error = ext4_xattr_set_entry(i, s); ++ error = ext4_xattr_set_entry(i, s, handle, inode); + if (!error) { + if (!IS_LAST_ENTRY(s->first)) + ext4_xattr_rehash(header(s->base), +@@ -845,7 +1132,7 @@ ext4_xattr_block_set(handle_t *handle, s + s->end = s->base + sb->s_blocksize; + } + +- error = ext4_xattr_set_entry(i, s); ++ error = ext4_xattr_set_entry(i, s, handle, inode); + if (error == -EIO) + goto bad_block; + if (error) +@@ -994,7 +1281,7 @@ int ext4_xattr_ibody_find(struct inode * + /* Find the named attribute. */ + error = ext4_xattr_find_entry(&is->s.here, i->name_index, + i->name, is->s.end - +- (void *)is->s.base, 0); ++ (void *)is->s.base, 0, inode); + if (error && error != -ENODATA) + return error; + is->s.not_found = error; +@@ -1012,7 +1299,7 @@ int ext4_xattr_ibody_inline_set(handle_t + + if (EXT4_I(inode)->i_extra_isize == 0) + return -ENOSPC; +- error = ext4_xattr_set_entry(i, s); ++ error = ext4_xattr_set_entry(i, s, handle, inode); + if (error) { + if (error == -ENOSPC && + ext4_has_inline_data(inode)) { +@@ -1024,7 +1311,7 @@ int ext4_xattr_ibody_inline_set(handle_t + error = ext4_xattr_ibody_find(inode, i, is); + if (error) + return error; +- error = ext4_xattr_set_entry(i, s); ++ error = ext4_xattr_set_entry(i, s, handle, inode); + } + if (error) + return error; +@@ -1050,7 +1337,7 @@ static int ext4_xattr_ibody_set(handle_t + + if (EXT4_I(inode)->i_extra_isize == 0) + return -ENOSPC; +- error = ext4_xattr_set_entry(i, s); ++ error = ext4_xattr_set_entry(i, s, handle, inode); + if (error) + return error; + header = IHDR(inode, ext4_raw_inode(&is->iloc)); +@@ -1086,7 +1373,7 @@ ext4_xattr_set_handle(handle_t *handle, + .name = name, + .value = value, + .value_len = value_len, +- ++ .in_inode = 0, + }; + struct ext4_xattr_ibody_find is = { + .s = { .not_found = -ENODATA, }, +@@ -1151,6 +1438,15 @@ ext4_xattr_set_handle(handle_t *handle, + goto cleanup; + } + error = ext4_xattr_block_set(handle, inode, &i, &bs); ++ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, ++ EXT4_FEATURE_INCOMPAT_EA_INODE) && ++ error == -ENOSPC) { ++ /* xattr not fit to block, store at external ++ * inode */ ++ i.in_inode = 1; ++ error = ext4_xattr_ibody_set(handle, inode, ++ &i, &is); ++ } + if (error) + goto cleanup; + if (!is.s.not_found) { +@@ -1197,9 +1493,22 @@ ext4_xattr_set(struct inode *inode, int + const void *value, size_t value_len, int flags) + { + handle_t *handle; ++ struct super_block *sb = inode->i_sb; + int error, retries = 0; + int credits = ext4_jbd2_credits_xattr(inode); + ++ if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) && ++ EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) { ++ int nrblocks = (value_len + sb->s_blocksize - 1) >> ++ sb->s_blocksize_bits; ++ ++ /* For new inode */ ++ credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3; ++ ++ /* For data blocks of EA inode */ ++ credits += ext4_meta_trans_blocks(inode, nrblocks, 0); ++ } ++ + retry: + handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); + if (IS_ERR(handle)) { +@@ -1211,7 +1520,7 @@ retry: + value, value_len, flags); + error2 = ext4_journal_stop(handle); + if (error == -ENOSPC && +- ext4_should_retry_alloc(inode->i_sb, &retries)) ++ ext4_should_retry_alloc(sb, &retries)) + goto retry; + if (error == 0) + error = error2; +@@ -1233,7 +1542,7 @@ static void ext4_xattr_shift_entries(str + + /* Adjust the value offsets of the entries */ + for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { +- if (!last->e_value_block && last->e_value_size) { ++ if (last->e_value_inum == 0 && last->e_value_size > 0) { + new_offs = le16_to_cpu(last->e_value_offs) + + value_offs_shift; + BUG_ON(new_offs + le32_to_cpu(last->e_value_size) +@@ -1472,19 +1781,91 @@ cleanup: + } + + +- ++#define EIA_INCR 16 /* must be 2^n */ ++#define EIA_MASK (EIA_INCR - 1) ++/* Add the large xattr @ino into @lea_ino_array for later deletion. ++ * If @lea_ino_array is new or full it will be grown and the old ++ * contents copied over. ++ */ ++static int ++ext4_expand_ino_array(struct ext4_xattr_ino_array **lea_ino_array, __u32 ino) ++{ ++ if (*lea_ino_array == NULL) { ++ /* ++ * Start with 15 inodes, so it fits into a power-of-two size. ++ * If *lea_ino_array is NULL, this is essentially offsetof() ++ */ ++ (*lea_ino_array) = ++ kmalloc(offsetof(struct ext4_xattr_ino_array, ++ xia_inodes[EIA_MASK]), ++ GFP_NOFS); ++ if (*lea_ino_array == NULL) ++ return -ENOMEM; ++ (*lea_ino_array)->xia_count = 0; ++ } else if (((*lea_ino_array)->xia_count & EIA_MASK) == EIA_MASK) { ++ /* expand the array once all 15 + n * 16 slots are full */ ++ struct ext4_xattr_ino_array *new_array = NULL; ++ int count = (*lea_ino_array)->xia_count; ++ ++ /* if new_array is NULL, this is essentially offsetof() */ ++ new_array = kmalloc( ++ offsetof(struct ext4_xattr_ino_array, ++ xia_inodes[count + EIA_INCR]), ++ GFP_NOFS); ++ if (new_array == NULL) ++ return -ENOMEM; ++ memcpy(new_array, *lea_ino_array, ++ offsetof(struct ext4_xattr_ino_array, ++ xia_inodes[count])); ++ kfree(*lea_ino_array); ++ *lea_ino_array = new_array; ++ } ++ (*lea_ino_array)->xia_inodes[(*lea_ino_array)->xia_count++] = ino; ++ return 0; ++} + /* + * ext4_xattr_delete_inode() + * +- * Free extended attribute resources associated with this inode. This ++ * Free extended attribute resources associated with this inode. Traverse ++ * all entries and unlink any xattr inodes associated with this inode. This + * is called immediately before an inode is freed. We have exclusive +- * access to the inode. ++ * access to the inode. If an orphan inode is deleted it will also delete any ++ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget() ++ * to ensure they belong to the parent inode and were not deleted already. + */ +-void +-ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) ++int ++ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, ++ struct ext4_xattr_ino_array **lea_ino_array) + { + struct buffer_head *bh = NULL; ++ struct ext4_xattr_ibody_header *header; ++ struct ext4_inode *raw_inode; ++ struct ext4_iloc iloc; ++ struct ext4_xattr_entry *entry; ++ int error = 0; ++ ++ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) ++ goto delete_external_ea; ++ ++ error = ext4_get_inode_loc(inode, &iloc); ++ if (error) ++ goto cleanup; ++ raw_inode = ext4_raw_inode(&iloc); ++ header = IHDR(inode, raw_inode); ++ entry = IFIRST(header); ++ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { ++ if (entry->e_value_inum == 0) ++ continue; ++ if (ext4_expand_ino_array(lea_ino_array, ++ entry->e_value_inum) != 0) { ++ brelse(iloc.bh); ++ goto cleanup; ++ } ++ entry->e_value_inum = 0; ++ } ++ brelse(iloc.bh); + ++delete_external_ea: + if (!EXT4_I(inode)->i_file_acl) + goto cleanup; + bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); +@@ -1499,11 +1880,74 @@ ext4_xattr_delete_inode(handle_t *handle + EXT4_I(inode)->i_file_acl); + goto cleanup; + } ++ ++ entry = BFIRST(bh); ++ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { ++ if (entry->e_value_inum == 0) ++ continue; ++ if (ext4_expand_ino_array(lea_ino_array, ++ entry->e_value_inum) != 0) ++ goto cleanup; ++ entry->e_value_inum = 0; ++ } ++ ++ /* add xattr inode to orphan list */ ++ if (*lea_ino_array != NULL) { ++ struct inode *ea_inode = NULL; ++ int idx = 0; ++ ++ for (; idx < (*lea_ino_array)->xia_count; ++idx) { ++ if (!ext4_handle_has_enough_credits(handle, 3)) { ++ error = ext4_journal_extend(handle, 3); ++ if (error > 0) ++ error = ext4_journal_restart(handle, 3); ++ if (error != 0) { ++ ext4_warning(inode->i_sb, ++ "couldn't extend journal " ++ "(err %d)", error); ++ goto cleanup; ++ } ++ } ++ ea_inode = ext4_xattr_inode_iget(inode, ++ (*lea_ino_array)->xia_inodes[idx], &error); ++ if (error) ++ continue; ++ ext4_orphan_add(handle, ea_inode); ++ /* the inode's i_count will be released by caller */ ++ } ++ } ++ + ext4_xattr_release_block(handle, inode, bh); + EXT4_I(inode)->i_file_acl = 0; + + cleanup: + brelse(bh); ++ ++ return error; ++} ++ ++void ++ext4_xattr_inode_array_free(struct inode *inode, ++ struct ext4_xattr_ino_array *lea_ino_array) ++{ ++ struct inode *ea_inode = NULL; ++ int idx = 0; ++ int err; ++ ++ if (lea_ino_array == NULL) ++ return; ++ ++ for (; idx < lea_ino_array->xia_count; ++idx) { ++ ea_inode = ext4_xattr_inode_iget(inode, ++ lea_ino_array->xia_inodes[idx], &err); ++ if (err) ++ continue; ++ clear_nlink(ea_inode); ++ iput(ea_inode); ++ /* for inode's i_count get from ext4_xattr_delete_inode */ ++ iput(ea_inode); ++ } ++ kfree(lea_ino_array); + } + + /* +@@ -1573,10 +2017,9 @@ ext4_xattr_cmp(struct ext4_xattr_header + entry1->e_name_index != entry2->e_name_index || + entry1->e_name_len != entry2->e_name_len || + entry1->e_value_size != entry2->e_value_size || ++ entry1->e_value_inum != entry2->e_value_inum || + memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) + return 1; +- if (entry1->e_value_block != 0 || entry2->e_value_block != 0) +- return -EIO; + if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), + (char *)header2 + le16_to_cpu(entry2->e_value_offs), + le32_to_cpu(entry1->e_value_size))) +@@ -1660,7 +2103,7 @@ static inline void ext4_xattr_hash_entry + *name++; + } + +- if (entry->e_value_block == 0 && entry->e_value_size != 0) { ++ if (entry->e_value_inum == 0 && entry->e_value_size != 0) { + __le32 *value = (__le32 *)((char *)header + + le16_to_cpu(entry->e_value_offs)); + for (n = (le32_to_cpu(entry->e_value_size) + +Index: linux-3.12.39-47.1/fs/ext4/xattr.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/xattr.h ++++ linux-3.12.39-47.1/fs/ext4/xattr.h +@@ -42,7 +42,7 @@ struct ext4_xattr_entry { + __u8 e_name_len; /* length of name */ + __u8 e_name_index; /* attribute name index */ + __le16 e_value_offs; /* offset in disk block of value */ +- __le32 e_value_block; /* disk block attribute is stored on (n/i) */ ++ __le32 e_value_inum; /* inode in which the value is stored */ + __le32 e_value_size; /* size of attribute value */ + __le32 e_hash; /* hash value of name and value */ + char e_name[0]; /* attribute name */ +@@ -67,6 +67,15 @@ struct ext4_xattr_entry { + EXT4_I(inode)->i_extra_isize)) + #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1)) + ++#define i_xattr_inode_parent i_mtime.tv_sec ++ ++/* ++ * The minimum size of EA value when you start storing it in an external inode ++ * size of block - size of header - size of 1 entry - 4 null bytes ++*/ ++#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \ ++ ((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4) ++ + #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data)) + #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr)) + #define BFIRST(bh) ENTRY(BHDR(bh)+1) +@@ -75,10 +84,11 @@ struct ext4_xattr_entry { + #define EXT4_ZERO_XATTR_VALUE ((void *)-1) + + struct ext4_xattr_info { +- int name_index; + const char *name; + const void *value; + size_t value_len; ++ int name_index; ++ int in_inode; + }; + + struct ext4_xattr_search { +@@ -106,7 +116,13 @@ extern int ext4_xattr_get(struct inode * + extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int); + extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int); + +-extern void ext4_xattr_delete_inode(handle_t *, struct inode *); ++extern struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino, ++ int *err); ++extern int ext4_xattr_inode_unlink(struct inode *inode, int ea_ino); ++extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, ++ struct ext4_xattr_ino_array **array); ++extern void ext4_xattr_inode_array_free(struct inode *inode, ++ struct ext4_xattr_ino_array *array); + extern void ext4_xattr_put_super(struct super_block *); + + extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, +Index: linux-3.12.39-47.1/fs/ext4/ialloc.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ialloc.c ++++ linux-3.12.39-47.1/fs/ext4/ialloc.c +@@ -252,7 +252,6 @@ void ext4_free_inode(handle_t *handle, s + * as writing the quota to disk may need the lock as well. + */ + dquot_initialize(inode); +- ext4_xattr_delete_inode(handle, inode); + dquot_free_inode(inode); + dquot_drop(inode); + +Index: linux-3.12.39-47.1/fs/ext4/inline.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inline.c ++++ linux-3.12.39-47.1/fs/ext4/inline.c +@@ -59,7 +59,7 @@ static int get_max_inline_xattr_value_si + + /* Compute min_offs. */ + for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { +- if (!entry->e_value_block && entry->e_value_size) { ++ if (!entry->e_value_inum && entry->e_value_size) { + size_t offs = le16_to_cpu(entry->e_value_offs); + if (offs < min_offs) + min_offs = offs; diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch new file mode 100644 index 0000000..65d49d4 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-misc.patch @@ -0,0 +1,169 @@ +Index: linux-3.12.39-47.1/fs/ext4/ext4.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ext4.h ++++ linux-3.12.39-47.1/fs/ext4/ext4.h +@@ -1431,6 +1431,8 @@ static inline void ext4_clear_state_flag + + #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime + ++#define JOURNAL_START_HAS_3ARGS 1 ++ + /* + * Codes for operating systems + */ +@@ -2694,6 +2696,11 @@ struct ext4_extent; + + extern int ext4_ext_tree_init(handle_t *handle, struct inode *); + extern int ext4_ext_writepage_trans_blocks(struct inode *, int); ++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb, ++ ext4_group_t block_group); ++extern struct buffer_head *ext4_append(handle_t *handle, ++ struct inode *inode, ++ ext4_lblk_t *block); + extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents); + extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags); +Index: linux-3.12.39-47.1/fs/ext4/namei.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/namei.c ++++ linux-3.12.39-47.1/fs/ext4/namei.c +@@ -48,7 +48,7 @@ + #define NAMEI_RA_BLOCKS 4 + #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) + +-static struct buffer_head *ext4_append(handle_t *handle, ++struct buffer_head *ext4_append(handle_t *handle, + struct inode *inode, + ext4_lblk_t *block) + { +@@ -71,6 +2200,7 @@ out: + return ERR_PTR(err); + inode->i_size += inode->i_sb->s_blocksize; + EXT4_I(inode)->i_disksize = inode->i_size; ++ BUFFER_TRACE(bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, bh); + if (err) { + brelse(bh); +@@ -154,6 +154,7 @@ static struct buffer_head *__ext4_read_d + } + return bh; + } ++EXPORT_SYMBOL(ext4_append); + + #ifndef assert + #define assert(test) J_ASSERT(test) +@@ -2199,7 +2200,7 @@ out: + * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2, + * since this indicates that nlinks count was previously 1. + */ +-static void ext4_inc_count(handle_t *handle, struct inode *inode) ++void ext4_inc_count(handle_t *handle, struct inode *inode) + { + inc_nlink(inode); + if (is_dx(inode) && inode->i_nlink > 1) { +@@ -2211,16 +2212,18 @@ static void ext4_inc_count(handle_t *han + } + } + } ++EXPORT_SYMBOL(ext4_inc_count); + + /* + * If a directory had nlink == 1, then we should let it be 1. This indicates + * directory has >EXT4_LINK_MAX subdirs. + */ +-static void ext4_dec_count(handle_t *handle, struct inode *inode) ++void ext4_dec_count(handle_t *handle, struct inode *inode) + { + if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) + drop_nlink(inode); + } ++EXPORT_SYMBOL(ext4_dec_count); + + + static int ext4_add_nondir(handle_t *handle, +@@ -2402,7 +2426,7 @@ out: + if (IS_DIRSYNC(dir)) + ext4_handle_sync(handle); + +- inode->i_op = &ext4_dir_inode_operations.ops; ++ inode->i_op = &ext4_dir_inode_operations; + inode->i_fop = &ext4_dir_operations; + return ext4_init_new_dir(handle, dir, inode); + } +Index: linux-3.12.39-47.1/fs/ext4/ialloc.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ialloc.c ++++ linux-3.12.39-47.1/fs/ext4/ialloc.c +@@ -109,7 +109,7 @@ void ext4_end_bitmap_read(struct buffer_ + * + * Return buffer_head of bitmap on success or NULL. + */ +-static struct buffer_head * ++struct buffer_head * + ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) + { + struct ext4_group_desc *desc; +@@ -192,6 +192,7 @@ verify: + set_buffer_verified(bh); + return bh; + } ++EXPORT_SYMBOL(ext4_read_inode_bitmap); + + /* + * NOTE! When we get the inode, we're the only people +Index: linux-3.12.39-47.1/fs/ext4/inode.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inode.c ++++ linux-3.12.39-47.1/fs/ext4/inode.c +@@ -5186,3 +5186,17 @@ out: + sb_end_pagefault(inode->i_sb); + return ret; + } ++EXPORT_SYMBOL(ext4_map_blocks); ++EXPORT_SYMBOL(ext4_truncate); ++EXPORT_SYMBOL(ext4_iget); ++EXPORT_SYMBOL(ext4_bread); ++EXPORT_SYMBOL(ext4_itable_unused_count); ++EXPORT_SYMBOL(ext4_force_commit); ++EXPORT_SYMBOL(ext4_mark_inode_dirty); ++EXPORT_SYMBOL(ext4_get_group_desc); ++EXPORT_SYMBOL(__ext4_journal_get_write_access); ++EXPORT_SYMBOL(__ext4_journal_start_sb); ++EXPORT_SYMBOL(__ext4_journal_stop); ++EXPORT_SYMBOL(__ext4_handle_dirty_metadata); ++EXPORT_SYMBOL(__ext4_std_error); ++EXPORT_SYMBOL(ext4fs_dirhash); +Index: linux-3.12.39-47.1/fs/ext4/mballoc.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/mballoc.c ++++ linux-3.12.39-47.1/fs/ext4/mballoc.c +@@ -759,6 +759,9 @@ void ext4_mb_generate_buddy(struct super + * corrupt and update bb_free using bitmap value + */ + grp->bb_free = free; ++ if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) ++ percpu_counter_sub(&sbi->s_freeclusters_counter, ++ grp->bb_free); + set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); + } + mb_set_largest_free_order(sb, grp); +Index: linux-3.12.39-47.1/fs/ext4/xattr.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/xattr.c ++++ linux-3.12.39-47.1/fs/ext4/xattr.c +@@ -541,6 +541,7 @@ ext4_xattr_release_block(handle_t *handl + int error = 0; + + ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr); ++ BUFFER_TRACE(bh, "get_write_access"); + error = ext4_journal_get_write_access(handle, bh); + if (error) + goto out; +@@ -781,6 +782,7 @@ ext4_xattr_block_set(handle_t *handle, s + if (s->base) { + ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev, + bs->bh->b_blocknr); ++ BUFFER_TRACE(bs->bh, "get_write_access"); + error = ext4_journal_get_write_access(handle, bs->bh); + if (error) + goto cleanup; diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch new file mode 100644 index 0000000..2ff2e23 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-pdirop.patch @@ -0,0 +1,1931 @@ +Single directory performance is a critical for HPC workloads. In a +typical use case an application creates a separate output file for +each node and task in a job. As nodes and tasks increase, hundreds +of thousands of files may be created in a single directory within +a short window of time. +Today, both filename lookup and file system modifying operations +(such as create and unlink) are protected with a single lock for +an entire ldiskfs directory. PDO project will remove this +bottleneck by introducing a parallel locking mechanism for entire +ldiskfs directories. This work will enable multiple application +threads to simultaneously lookup, create and unlink in parallel. + +This patch contains: + - pdirops support for ldiskfs + - integrate with osd-ldiskfs + +Index: linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h +=================================================================== +--- /dev/null ++++ linux-3.10.0-229.1.2.fc21.x86_64/include/linux/htree_lock.h +@@ -0,0 +1,187 @@ ++/* ++ * include/linux/htree_lock.h ++ * ++ * Copyright (c) 2011, 2012, Intel Corporation. ++ * ++ * Author: Liang Zhen ++ */ ++ ++/* ++ * htree lock ++ * ++ * htree_lock is an advanced lock, it can support five lock modes (concept is ++ * taken from DLM) and it's a sleeping lock. ++ * ++ * most common use case is: ++ * - create a htree_lock_head for data ++ * - each thread (contender) creates it's own htree_lock ++ * - contender needs to call htree_lock(lock_node, mode) to protect data and ++ * call htree_unlock to release lock ++ * ++ * Also, there is advanced use-case which is more complex, user can have ++ * PW/PR lock on particular key, it's mostly used while user holding shared ++ * lock on the htree (CW, CR) ++ * ++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR ++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR ++ * ... ++ * htree_node_unlock(lock_node);; unlock the key ++ * ++ * Another tip is, we can have N-levels of this kind of keys, all we need to ++ * do is specifying N-levels while creating htree_lock_head, then we can ++ * lock/unlock a specific level by: ++ * htree_node_lock(lock_node, mode1, key1, level1...); ++ * do something; ++ * htree_node_lock(lock_node, mode1, key2, level2...); ++ * do something; ++ * htree_node_unlock(lock_node, level2); ++ * htree_node_unlock(lock_node, level1); ++ * ++ * NB: for multi-level, should be careful about locking order to avoid deadlock ++ */ ++ ++#ifndef _LINUX_HTREE_LOCK_H ++#define _LINUX_HTREE_LOCK_H ++ ++#include ++#include ++#include ++ ++/* ++ * Lock Modes ++ * more details can be found here: ++ * http://en.wikipedia.org/wiki/Distributed_lock_manager ++ */ ++typedef enum { ++ HTREE_LOCK_EX = 0, /* exclusive lock: incompatible with all others */ ++ HTREE_LOCK_PW, /* protected write: allows only CR users */ ++ HTREE_LOCK_PR, /* protected read: allow PR, CR users */ ++ HTREE_LOCK_CW, /* concurrent write: allow CR, CW users */ ++ HTREE_LOCK_CR, /* concurrent read: allow all but EX users */ ++ HTREE_LOCK_MAX, /* number of lock modes */ ++} htree_lock_mode_t; ++ ++#define HTREE_LOCK_NL HTREE_LOCK_MAX ++#define HTREE_LOCK_INVAL 0xdead10c ++ ++enum { ++ HTREE_HBITS_MIN = 2, ++ HTREE_HBITS_DEF = 14, ++ HTREE_HBITS_MAX = 32, ++}; ++ ++enum { ++ HTREE_EVENT_DISABLE = (0), ++ HTREE_EVENT_RD = (1 << HTREE_LOCK_PR), ++ HTREE_EVENT_WR = (1 << HTREE_LOCK_PW), ++ HTREE_EVENT_RDWR = (HTREE_EVENT_RD | HTREE_EVENT_WR), ++}; ++ ++struct htree_lock; ++ ++typedef void (*htree_event_cb_t)(void *target, void *event); ++ ++struct htree_lock_child { ++ struct list_head lc_list; /* granted list */ ++ htree_event_cb_t lc_callback; /* event callback */ ++ unsigned lc_events; /* event types */ ++}; ++ ++struct htree_lock_head { ++ unsigned long lh_lock; /* bits lock */ ++ /* blocked lock list (htree_lock) */ ++ struct list_head lh_blocked_list; ++ /* # key levels */ ++ u16 lh_depth; ++ /* hash bits for key and limit number of locks */ ++ u16 lh_hbits; ++ /* counters for blocked locks */ ++ u16 lh_nblocked[HTREE_LOCK_MAX]; ++ /* counters for granted locks */ ++ u16 lh_ngranted[HTREE_LOCK_MAX]; ++ /* private data */ ++ void *lh_private; ++ /* array of children locks */ ++ struct htree_lock_child lh_children[0]; ++}; ++ ++/* htree_lock_node_t is child-lock for a specific key (ln_value) */ ++struct htree_lock_node { ++ htree_lock_mode_t ln_mode; ++ /* major hash key */ ++ u16 ln_major_key; ++ /* minor hash key */ ++ u16 ln_minor_key; ++ struct list_head ln_major_list; ++ struct list_head ln_minor_list; ++ /* alive list, all locks (granted, blocked, listening) are on it */ ++ struct list_head ln_alive_list; ++ /* blocked list */ ++ struct list_head ln_blocked_list; ++ /* granted list */ ++ struct list_head ln_granted_list; ++ void *ln_ev_target; ++}; ++ ++struct htree_lock { ++ struct task_struct *lk_task; ++ struct htree_lock_head *lk_head; ++ void *lk_private; ++ unsigned lk_depth; ++ htree_lock_mode_t lk_mode; ++ struct list_head lk_blocked_list; ++ struct htree_lock_node lk_nodes[0]; ++}; ++ ++/* create a lock head, which stands for a resource */ ++struct htree_lock_head *htree_lock_head_alloc(unsigned depth, ++ unsigned hbits, unsigned priv); ++/* free a lock head */ ++void htree_lock_head_free(struct htree_lock_head *lhead); ++/* register event callback for child lock at level @depth */ ++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth, ++ unsigned events, htree_event_cb_t callback); ++/* create a lock handle, which stands for a thread */ ++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes); ++/* free a lock handle */ ++void htree_lock_free(struct htree_lock *lck); ++/* lock htree, when @wait is true, 0 is returned if the lock can't ++ * be granted immediately */ ++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead, ++ htree_lock_mode_t mode, int wait); ++/* unlock htree */ ++void htree_unlock(struct htree_lock *lck); ++/* unlock and relock htree with @new_mode */ ++int htree_change_lock_try(struct htree_lock *lck, ++ htree_lock_mode_t new_mode, int wait); ++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode); ++/* require child lock (key) of htree at level @dep, @event will be sent to all ++ * listeners on this @key while lock being granted */ ++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, ++ u32 key, unsigned dep, int wait, void *event); ++/* release child lock at level @dep, this lock will listen on it's key ++ * if @event isn't NULL, event_cb will be called against @lck while granting ++ * any other lock at level @dep with the same key */ ++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event); ++/* stop listening on child lock at level @dep */ ++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep); ++/* for debug */ ++void htree_lock_stat_print(int depth); ++void htree_lock_stat_reset(void); ++ ++#define htree_lock(lck, lh, mode) htree_lock_try(lck, lh, mode, 1) ++#define htree_change_lock(lck, mode) htree_change_lock_try(lck, mode, 1) ++ ++#define htree_lock_mode(lck) ((lck)->lk_mode) ++ ++#define htree_node_lock(lck, mode, key, dep) \ ++ htree_node_lock_try(lck, mode, key, dep, 1, NULL) ++/* this is only safe in thread context of lock owner */ ++#define htree_node_is_granted(lck, dep) \ ++ ((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \ ++ (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL) ++/* this is only safe in thread context of lock owner */ ++#define htree_node_is_listening(lck, dep) \ ++ ((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL) ++ ++#endif +Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c +=================================================================== +--- /dev/null ++++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c +@@ -0,0 +1,880 @@ ++/* ++ * fs/ext4/htree_lock.c ++ * ++ * Copyright (c) 2011, 2012, Intel Corporation. ++ * ++ * Author: Liang Zhen ++ */ ++#include ++#include ++#include ++#include ++ ++enum { ++ HTREE_LOCK_BIT_EX = (1 << HTREE_LOCK_EX), ++ HTREE_LOCK_BIT_PW = (1 << HTREE_LOCK_PW), ++ HTREE_LOCK_BIT_PR = (1 << HTREE_LOCK_PR), ++ HTREE_LOCK_BIT_CW = (1 << HTREE_LOCK_CW), ++ HTREE_LOCK_BIT_CR = (1 << HTREE_LOCK_CR), ++}; ++ ++enum { ++ HTREE_LOCK_COMPAT_EX = 0, ++ HTREE_LOCK_COMPAT_PW = HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR, ++ HTREE_LOCK_COMPAT_PR = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR, ++ HTREE_LOCK_COMPAT_CW = HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW, ++ HTREE_LOCK_COMPAT_CR = HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR | ++ HTREE_LOCK_BIT_PW, ++}; ++ ++static int htree_lock_compat[] = { ++ [HTREE_LOCK_EX] HTREE_LOCK_COMPAT_EX, ++ [HTREE_LOCK_PW] HTREE_LOCK_COMPAT_PW, ++ [HTREE_LOCK_PR] HTREE_LOCK_COMPAT_PR, ++ [HTREE_LOCK_CW] HTREE_LOCK_COMPAT_CW, ++ [HTREE_LOCK_CR] HTREE_LOCK_COMPAT_CR, ++}; ++ ++/* max allowed htree-lock depth. ++ * We only need depth=3 for ext4 although user can have higher value. */ ++#define HTREE_LOCK_DEP_MAX 16 ++ ++#ifdef HTREE_LOCK_DEBUG ++ ++static char *hl_name[] = { ++ [HTREE_LOCK_EX] "EX", ++ [HTREE_LOCK_PW] "PW", ++ [HTREE_LOCK_PR] "PR", ++ [HTREE_LOCK_CW] "CW", ++ [HTREE_LOCK_CR] "CR", ++}; ++ ++/* lock stats */ ++struct htree_lock_node_stats { ++ unsigned long long blocked[HTREE_LOCK_MAX]; ++ unsigned long long granted[HTREE_LOCK_MAX]; ++ unsigned long long retried[HTREE_LOCK_MAX]; ++ unsigned long long events; ++}; ++ ++struct htree_lock_stats { ++ struct htree_lock_node_stats nodes[HTREE_LOCK_DEP_MAX]; ++ unsigned long long granted[HTREE_LOCK_MAX]; ++ unsigned long long blocked[HTREE_LOCK_MAX]; ++}; ++ ++static struct htree_lock_stats hl_stats; ++ ++void htree_lock_stat_reset(void) ++{ ++ memset(&hl_stats, 0, sizeof(hl_stats)); ++} ++ ++void htree_lock_stat_print(int depth) ++{ ++ int i; ++ int j; ++ ++ printk(KERN_DEBUG "HTREE LOCK STATS:\n"); ++ for (i = 0; i < HTREE_LOCK_MAX; i++) { ++ printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n", ++ hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]); ++ } ++ for (i = 0; i < depth; i++) { ++ printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i); ++ for (j = 0; j < HTREE_LOCK_MAX; j++) { ++ printk(KERN_DEBUG ++ "[%s]: G [%10llu], B [%10llu], R [%10llu]\n", ++ hl_name[j], hl_stats.nodes[i].granted[j], ++ hl_stats.nodes[i].blocked[j], ++ hl_stats.nodes[i].retried[j]); ++ } ++ } ++} ++ ++#define lk_grant_inc(m) do { hl_stats.granted[m]++; } while (0) ++#define lk_block_inc(m) do { hl_stats.blocked[m]++; } while (0) ++#define ln_grant_inc(d, m) do { hl_stats.nodes[d].granted[m]++; } while (0) ++#define ln_block_inc(d, m) do { hl_stats.nodes[d].blocked[m]++; } while (0) ++#define ln_retry_inc(d, m) do { hl_stats.nodes[d].retried[m]++; } while (0) ++#define ln_event_inc(d) do { hl_stats.nodes[d].events++; } while (0) ++ ++#else /* !DEBUG */ ++ ++void htree_lock_stat_reset(void) {} ++void htree_lock_stat_print(int depth) {} ++ ++#define lk_grant_inc(m) do {} while (0) ++#define lk_block_inc(m) do {} while (0) ++#define ln_grant_inc(d, m) do {} while (0) ++#define ln_block_inc(d, m) do {} while (0) ++#define ln_retry_inc(d, m) do {} while (0) ++#define ln_event_inc(d) do {} while (0) ++ ++#endif /* DEBUG */ ++ ++EXPORT_SYMBOL(htree_lock_stat_reset); ++EXPORT_SYMBOL(htree_lock_stat_print); ++ ++#define HTREE_DEP_ROOT (-1) ++ ++#define htree_spin_lock(lhead, dep) \ ++ bit_spin_lock((dep) + 1, &(lhead)->lh_lock) ++#define htree_spin_unlock(lhead, dep) \ ++ bit_spin_unlock((dep) + 1, &(lhead)->lh_lock) ++ ++#define htree_key_event_ignore(child, ln) \ ++ (!((child)->lc_events & (1 << (ln)->ln_mode))) ++ ++static int ++htree_key_list_empty(struct htree_lock_node *ln) ++{ ++ return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list); ++} ++ ++static void ++htree_key_list_del_init(struct htree_lock_node *ln) ++{ ++ struct htree_lock_node *tmp = NULL; ++ ++ if (!list_empty(&ln->ln_minor_list)) { ++ tmp = list_entry(ln->ln_minor_list.next, ++ struct htree_lock_node, ln_minor_list); ++ list_del_init(&ln->ln_minor_list); ++ } ++ ++ if (list_empty(&ln->ln_major_list)) ++ return; ++ ++ if (tmp == NULL) { /* not on minor key list */ ++ list_del_init(&ln->ln_major_list); ++ } else { ++ BUG_ON(!list_empty(&tmp->ln_major_list)); ++ list_replace_init(&ln->ln_major_list, &tmp->ln_major_list); ++ } ++} ++ ++static void ++htree_key_list_replace_init(struct htree_lock_node *old, ++ struct htree_lock_node *new) ++{ ++ if (!list_empty(&old->ln_major_list)) ++ list_replace_init(&old->ln_major_list, &new->ln_major_list); ++ ++ if (!list_empty(&old->ln_minor_list)) ++ list_replace_init(&old->ln_minor_list, &new->ln_minor_list); ++} ++ ++static void ++htree_key_event_enqueue(struct htree_lock_child *child, ++ struct htree_lock_node *ln, int dep, void *event) ++{ ++ struct htree_lock_node *tmp; ++ ++ /* NB: ALWAYS called holding lhead::lh_lock(dep) */ ++ BUG_ON(ln->ln_mode == HTREE_LOCK_NL); ++ if (event == NULL || htree_key_event_ignore(child, ln)) ++ return; ++ ++ /* shouldn't be a very long list */ ++ list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) { ++ if (tmp->ln_mode == HTREE_LOCK_NL) { ++ ln_event_inc(dep); ++ if (child->lc_callback != NULL) ++ child->lc_callback(tmp->ln_ev_target, event); ++ } ++ } ++} ++ ++static int ++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk, ++ unsigned dep, int wait, void *event) ++{ ++ struct htree_lock_child *child = &newlk->lk_head->lh_children[dep]; ++ struct htree_lock_node *newln = &newlk->lk_nodes[dep]; ++ struct htree_lock_node *curln = &curlk->lk_nodes[dep]; ++ ++ /* NB: ALWAYS called holding lhead::lh_lock(dep) */ ++ /* NB: we only expect PR/PW lock mode at here, only these two modes are ++ * allowed for htree_node_lock(asserted in htree_node_lock_internal), ++ * NL is only used for listener, user can't directly require NL mode */ ++ if ((curln->ln_mode == HTREE_LOCK_NL) || ++ (curln->ln_mode != HTREE_LOCK_PW && ++ newln->ln_mode != HTREE_LOCK_PW)) { ++ /* no conflict, attach it on granted list of @curlk */ ++ if (curln->ln_mode != HTREE_LOCK_NL) { ++ list_add(&newln->ln_granted_list, ++ &curln->ln_granted_list); ++ } else { ++ /* replace key owner */ ++ htree_key_list_replace_init(curln, newln); ++ } ++ ++ list_add(&newln->ln_alive_list, &curln->ln_alive_list); ++ htree_key_event_enqueue(child, newln, dep, event); ++ ln_grant_inc(dep, newln->ln_mode); ++ return 1; /* still hold lh_lock */ ++ } ++ ++ if (!wait) { /* can't grant and don't want to wait */ ++ ln_retry_inc(dep, newln->ln_mode); ++ newln->ln_mode = HTREE_LOCK_INVAL; ++ return -1; /* don't wait and just return -1 */ ++ } ++ ++ newlk->lk_task = current; ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ /* conflict, attach it on blocked list of curlk */ ++ list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list); ++ list_add(&newln->ln_alive_list, &curln->ln_alive_list); ++ ln_block_inc(dep, newln->ln_mode); ++ ++ htree_spin_unlock(newlk->lk_head, dep); ++ /* wait to be given the lock */ ++ if (newlk->lk_task != NULL) ++ schedule(); ++ /* granted, no doubt, wake up will set me RUNNING */ ++ if (event == NULL || htree_key_event_ignore(child, newln)) ++ return 0; /* granted without lh_lock */ ++ ++ htree_spin_lock(newlk->lk_head, dep); ++ htree_key_event_enqueue(child, newln, dep, event); ++ return 1; /* still hold lh_lock */ ++} ++ ++/* ++ * get PR/PW access to particular tree-node according to @dep and @key, ++ * it will return -1 if @wait is false and can't immediately grant this lock. ++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get ++ * @event if it's not NULL. ++ * NB: ALWAYS called holding lhead::lh_lock ++ */ ++static int ++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck, ++ htree_lock_mode_t mode, u32 key, unsigned dep, ++ int wait, void *event) ++{ ++ LIST_HEAD(list); ++ struct htree_lock *tmp; ++ struct htree_lock *tmp2; ++ u16 major; ++ u16 minor; ++ u8 reverse; ++ u8 ma_bits; ++ u8 mi_bits; ++ ++ BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR); ++ BUG_ON(htree_node_is_granted(lck, dep)); ++ ++ key = hash_long(key, lhead->lh_hbits); ++ ++ mi_bits = lhead->lh_hbits >> 1; ++ ma_bits = lhead->lh_hbits - mi_bits; ++ ++ lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1); ++ lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits; ++ lck->lk_nodes[dep].ln_mode = mode; ++ ++ /* ++ * The major key list is an ordered list, so searches are started ++ * at the end of the list that is numerically closer to major_key, ++ * so at most half of the list will be walked (for well-distributed ++ * keys). The list traversal aborts early if the expected key ++ * location is passed. ++ */ ++ reverse = (major >= (1 << (ma_bits - 1))); ++ ++ if (reverse) { ++ list_for_each_entry_reverse(tmp, ++ &lhead->lh_children[dep].lc_list, ++ lk_nodes[dep].ln_major_list) { ++ if (tmp->lk_nodes[dep].ln_major_key == major) { ++ goto search_minor; ++ ++ } else if (tmp->lk_nodes[dep].ln_major_key < major) { ++ /* attach _after_ @tmp */ ++ list_add(&lck->lk_nodes[dep].ln_major_list, ++ &tmp->lk_nodes[dep].ln_major_list); ++ goto out_grant_major; ++ } ++ } ++ ++ list_add(&lck->lk_nodes[dep].ln_major_list, ++ &lhead->lh_children[dep].lc_list); ++ goto out_grant_major; ++ ++ } else { ++ list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list, ++ lk_nodes[dep].ln_major_list) { ++ if (tmp->lk_nodes[dep].ln_major_key == major) { ++ goto search_minor; ++ ++ } else if (tmp->lk_nodes[dep].ln_major_key > major) { ++ /* insert _before_ @tmp */ ++ list_add_tail(&lck->lk_nodes[dep].ln_major_list, ++ &tmp->lk_nodes[dep].ln_major_list); ++ goto out_grant_major; ++ } ++ } ++ ++ list_add_tail(&lck->lk_nodes[dep].ln_major_list, ++ &lhead->lh_children[dep].lc_list); ++ goto out_grant_major; ++ } ++ ++ search_minor: ++ /* ++ * NB: minor_key list doesn't have a "head", @list is just a ++ * temporary stub for helping list searching, make sure it's removed ++ * after searching. ++ * minor_key list is an ordered list too. ++ */ ++ list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list); ++ ++ reverse = (minor >= (1 << (mi_bits - 1))); ++ ++ if (reverse) { ++ list_for_each_entry_reverse(tmp2, &list, ++ lk_nodes[dep].ln_minor_list) { ++ if (tmp2->lk_nodes[dep].ln_minor_key == minor) { ++ goto out_enqueue; ++ ++ } else if (tmp2->lk_nodes[dep].ln_minor_key < minor) { ++ /* attach _after_ @tmp2 */ ++ list_add(&lck->lk_nodes[dep].ln_minor_list, ++ &tmp2->lk_nodes[dep].ln_minor_list); ++ goto out_grant_minor; ++ } ++ } ++ ++ list_add(&lck->lk_nodes[dep].ln_minor_list, &list); ++ ++ } else { ++ list_for_each_entry(tmp2, &list, ++ lk_nodes[dep].ln_minor_list) { ++ if (tmp2->lk_nodes[dep].ln_minor_key == minor) { ++ goto out_enqueue; ++ ++ } else if (tmp2->lk_nodes[dep].ln_minor_key > minor) { ++ /* insert _before_ @tmp2 */ ++ list_add_tail(&lck->lk_nodes[dep].ln_minor_list, ++ &tmp2->lk_nodes[dep].ln_minor_list); ++ goto out_grant_minor; ++ } ++ } ++ ++ list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list); ++ } ++ ++ out_grant_minor: ++ if (list.next == &lck->lk_nodes[dep].ln_minor_list) { ++ /* new lock @lck is the first one on minor_key list, which ++ * means it has the smallest minor_key and it should ++ * replace @tmp as minor_key owner */ ++ list_replace_init(&tmp->lk_nodes[dep].ln_major_list, ++ &lck->lk_nodes[dep].ln_major_list); ++ } ++ /* remove the temporary head */ ++ list_del(&list); ++ ++ out_grant_major: ++ ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode); ++ return 1; /* granted with holding lh_lock */ ++ ++ out_enqueue: ++ list_del(&list); /* remove temprary head */ ++ return htree_node_lock_enqueue(lck, tmp2, dep, wait, event); ++} ++ ++/* ++ * release the key of @lck at level @dep, and grant any blocked locks. ++ * caller will still listen on @key if @event is not NULL, which means ++ * caller can see a event (by event_cb) while granting any lock with ++ * the same key at level @dep. ++ * NB: ALWAYS called holding lhead::lh_lock ++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL ++ */ ++static void ++htree_node_unlock_internal(struct htree_lock_head *lhead, ++ struct htree_lock *curlk, unsigned dep, void *event) ++{ ++ struct htree_lock_node *curln = &curlk->lk_nodes[dep]; ++ struct htree_lock *grtlk = NULL; ++ struct htree_lock_node *grtln; ++ struct htree_lock *poslk; ++ struct htree_lock *tmplk; ++ ++ if (!htree_node_is_granted(curlk, dep)) ++ return; ++ ++ if (!list_empty(&curln->ln_granted_list)) { ++ /* there is another granted lock */ ++ grtlk = list_entry(curln->ln_granted_list.next, ++ struct htree_lock, ++ lk_nodes[dep].ln_granted_list); ++ list_del_init(&curln->ln_granted_list); ++ } ++ ++ if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) { ++ /* ++ * @curlk is the only granted lock, so we confirmed: ++ * a) curln is key owner (attached on major/minor_list), ++ * so if there is any blocked lock, it should be attached ++ * on curln->ln_blocked_list ++ * b) we always can grant the first blocked lock ++ */ ++ grtlk = list_entry(curln->ln_blocked_list.next, ++ struct htree_lock, ++ lk_nodes[dep].ln_blocked_list); ++ BUG_ON(grtlk->lk_task == NULL); ++ wake_up_process(grtlk->lk_task); ++ } ++ ++ if (event != NULL && ++ lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) { ++ curln->ln_ev_target = event; ++ curln->ln_mode = HTREE_LOCK_NL; /* listen! */ ++ } else { ++ curln->ln_mode = HTREE_LOCK_INVAL; ++ } ++ ++ if (grtlk == NULL) { /* I must be the only one locking this key */ ++ struct htree_lock_node *tmpln; ++ ++ BUG_ON(htree_key_list_empty(curln)); ++ ++ if (curln->ln_mode == HTREE_LOCK_NL) /* listening */ ++ return; ++ ++ /* not listening */ ++ if (list_empty(&curln->ln_alive_list)) { /* no more listener */ ++ htree_key_list_del_init(curln); ++ return; ++ } ++ ++ tmpln = list_entry(curln->ln_alive_list.next, ++ struct htree_lock_node, ln_alive_list); ++ ++ BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL); ++ ++ htree_key_list_replace_init(curln, tmpln); ++ list_del_init(&curln->ln_alive_list); ++ ++ return; ++ } ++ ++ /* have a granted lock */ ++ grtln = &grtlk->lk_nodes[dep]; ++ if (!list_empty(&curln->ln_blocked_list)) { ++ /* only key owner can be on both lists */ ++ BUG_ON(htree_key_list_empty(curln)); ++ ++ if (list_empty(&grtln->ln_blocked_list)) { ++ list_add(&grtln->ln_blocked_list, ++ &curln->ln_blocked_list); ++ } ++ list_del_init(&curln->ln_blocked_list); ++ } ++ /* ++ * NB: this is the tricky part: ++ * We have only two modes for child-lock (PR and PW), also, ++ * only owner of the key (attached on major/minor_list) can be on ++ * both blocked_list and granted_list, so @grtlk must be one ++ * of these two cases: ++ * ++ * a) @grtlk is taken from granted_list, which means we've granted ++ * more than one lock so @grtlk has to be PR, the first blocked ++ * lock must be PW and we can't grant it at all. ++ * So even @grtlk is not owner of the key (empty blocked_list), ++ * we don't care because we can't grant any lock. ++ * b) we just grant a new lock which is taken from head of blocked ++ * list, and it should be the first granted lock, and it should ++ * be the first one linked on blocked_list. ++ * ++ * Either way, we can get correct result by iterating blocked_list ++ * of @grtlk, and don't have to bother on how to find out ++ * owner of current key. ++ */ ++ list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list, ++ lk_nodes[dep].ln_blocked_list) { ++ if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW || ++ poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW) ++ break; ++ /* grant all readers */ ++ list_del_init(&poslk->lk_nodes[dep].ln_blocked_list); ++ list_add(&poslk->lk_nodes[dep].ln_granted_list, ++ &grtln->ln_granted_list); ++ ++ BUG_ON(poslk->lk_task == NULL); ++ wake_up_process(poslk->lk_task); ++ } ++ ++ /* if @curln is the owner of this key, replace it with @grtln */ ++ if (!htree_key_list_empty(curln)) ++ htree_key_list_replace_init(curln, grtln); ++ ++ if (curln->ln_mode == HTREE_LOCK_INVAL) ++ list_del_init(&curln->ln_alive_list); ++} ++ ++/* ++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted ++ * and 0 only if @wait is false and can't grant it immediately ++ */ ++int ++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, ++ u32 key, unsigned dep, int wait, void *event) ++{ ++ struct htree_lock_head *lhead = lck->lk_head; ++ int rc; ++ ++ BUG_ON(dep >= lck->lk_depth); ++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL); ++ ++ htree_spin_lock(lhead, dep); ++ rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event); ++ if (rc != 0) ++ htree_spin_unlock(lhead, dep); ++ return rc >= 0; ++} ++EXPORT_SYMBOL(htree_node_lock_try); ++ ++/* it's wrapper of htree_node_unlock_internal */ ++void ++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event) ++{ ++ struct htree_lock_head *lhead = lck->lk_head; ++ ++ BUG_ON(dep >= lck->lk_depth); ++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL); ++ ++ htree_spin_lock(lhead, dep); ++ htree_node_unlock_internal(lhead, lck, dep, event); ++ htree_spin_unlock(lhead, dep); ++} ++EXPORT_SYMBOL(htree_node_unlock); ++ ++/* stop listening on child-lock level @dep */ ++void ++htree_node_stop_listen(struct htree_lock *lck, unsigned dep) ++{ ++ struct htree_lock_node *ln = &lck->lk_nodes[dep]; ++ struct htree_lock_node *tmp; ++ ++ BUG_ON(htree_node_is_granted(lck, dep)); ++ BUG_ON(!list_empty(&ln->ln_blocked_list)); ++ BUG_ON(!list_empty(&ln->ln_granted_list)); ++ ++ if (!htree_node_is_listening(lck, dep)) ++ return; ++ ++ htree_spin_lock(lck->lk_head, dep); ++ ln->ln_mode = HTREE_LOCK_INVAL; ++ ln->ln_ev_target = NULL; ++ ++ if (htree_key_list_empty(ln)) { /* not owner */ ++ list_del_init(&ln->ln_alive_list); ++ goto out; ++ } ++ ++ /* I'm the owner... */ ++ if (list_empty(&ln->ln_alive_list)) { /* no more listener */ ++ htree_key_list_del_init(ln); ++ goto out; ++ } ++ ++ tmp = list_entry(ln->ln_alive_list.next, ++ struct htree_lock_node, ln_alive_list); ++ ++ BUG_ON(tmp->ln_mode != HTREE_LOCK_NL); ++ htree_key_list_replace_init(ln, tmp); ++ list_del_init(&ln->ln_alive_list); ++ out: ++ htree_spin_unlock(lck->lk_head, dep); ++} ++EXPORT_SYMBOL(htree_node_stop_listen); ++ ++/* release all child-locks if we have any */ ++static void ++htree_node_release_all(struct htree_lock *lck) ++{ ++ int i; ++ ++ for (i = 0; i < lck->lk_depth; i++) { ++ if (htree_node_is_granted(lck, i)) ++ htree_node_unlock(lck, i, NULL); ++ else if (htree_node_is_listening(lck, i)) ++ htree_node_stop_listen(lck, i); ++ } ++} ++ ++/* ++ * obtain htree lock, it could be blocked inside if there's conflict ++ * with any granted or blocked lock and @wait is true. ++ * NB: ALWAYS called holding lhead::lh_lock ++ */ ++static int ++htree_lock_internal(struct htree_lock *lck, int wait) ++{ ++ struct htree_lock_head *lhead = lck->lk_head; ++ int granted = 0; ++ int blocked = 0; ++ int i; ++ ++ for (i = 0; i < HTREE_LOCK_MAX; i++) { ++ if (lhead->lh_ngranted[i] != 0) ++ granted |= 1 << i; ++ if (lhead->lh_nblocked[i] != 0) ++ blocked |= 1 << i; ++ } ++ if ((htree_lock_compat[lck->lk_mode] & granted) != granted || ++ (htree_lock_compat[lck->lk_mode] & blocked) != blocked) { ++ /* will block current lock even it just conflicts with any ++ * other blocked lock, so lock like EX wouldn't starve */ ++ if (!wait) ++ return -1; ++ lhead->lh_nblocked[lck->lk_mode]++; ++ lk_block_inc(lck->lk_mode); ++ ++ lck->lk_task = current; ++ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ htree_spin_unlock(lhead, HTREE_DEP_ROOT); ++ /* wait to be given the lock */ ++ if (lck->lk_task != NULL) ++ schedule(); ++ /* granted, no doubt. wake up will set me RUNNING */ ++ return 0; /* without lh_lock */ ++ } ++ lhead->lh_ngranted[lck->lk_mode]++; ++ lk_grant_inc(lck->lk_mode); ++ return 1; ++} ++ ++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */ ++static void ++htree_unlock_internal(struct htree_lock *lck) ++{ ++ struct htree_lock_head *lhead = lck->lk_head; ++ struct htree_lock *tmp; ++ struct htree_lock *tmp2; ++ int granted = 0; ++ int i; ++ ++ BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0); ++ ++ lhead->lh_ngranted[lck->lk_mode]--; ++ lck->lk_mode = HTREE_LOCK_INVAL; ++ ++ for (i = 0; i < HTREE_LOCK_MAX; i++) { ++ if (lhead->lh_ngranted[i] != 0) ++ granted |= 1 << i; ++ } ++ list_for_each_entry_safe(tmp, tmp2, ++ &lhead->lh_blocked_list, lk_blocked_list) { ++ /* conflict with any granted lock? */ ++ if ((htree_lock_compat[tmp->lk_mode] & granted) != granted) ++ break; ++ ++ list_del_init(&tmp->lk_blocked_list); ++ ++ BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0); ++ ++ lhead->lh_nblocked[tmp->lk_mode]--; ++ lhead->lh_ngranted[tmp->lk_mode]++; ++ granted |= 1 << tmp->lk_mode; ++ ++ BUG_ON(tmp->lk_task == NULL); ++ wake_up_process(tmp->lk_task); ++ } ++} ++ ++/* it's wrapper of htree_lock_internal and exported interface. ++ * It always return 1 with granted lock if @wait is true, it can return 0 ++ * if @wait is false and locking request can't be granted immediately */ ++int ++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead, ++ htree_lock_mode_t mode, int wait) ++{ ++ int rc; ++ ++ BUG_ON(lck->lk_depth > lhead->lh_depth); ++ BUG_ON(lck->lk_head != NULL); ++ BUG_ON(lck->lk_task != NULL); ++ ++ lck->lk_head = lhead; ++ lck->lk_mode = mode; ++ ++ htree_spin_lock(lhead, HTREE_DEP_ROOT); ++ rc = htree_lock_internal(lck, wait); ++ if (rc != 0) ++ htree_spin_unlock(lhead, HTREE_DEP_ROOT); ++ return rc >= 0; ++} ++EXPORT_SYMBOL(htree_lock_try); ++ ++/* it's wrapper of htree_unlock_internal and exported interface. ++ * It will release all htree_node_locks and htree_lock */ ++void ++htree_unlock(struct htree_lock *lck) ++{ ++ BUG_ON(lck->lk_head == NULL); ++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL); ++ ++ htree_node_release_all(lck); ++ ++ htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT); ++ htree_unlock_internal(lck); ++ htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT); ++ lck->lk_head = NULL; ++ lck->lk_task = NULL; ++} ++EXPORT_SYMBOL(htree_unlock); ++ ++/* change lock mode */ ++void ++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode) ++{ ++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL); ++ lck->lk_mode = mode; ++} ++EXPORT_SYMBOL(htree_change_mode); ++ ++/* release htree lock, and lock it again with new mode. ++ * This function will first release all htree_node_locks and htree_lock, ++ * then try to gain htree_lock with new @mode. ++ * It always return 1 with granted lock if @wait is true, it can return 0 ++ * if @wait is false and locking request can't be granted immediately */ ++int ++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait) ++{ ++ struct htree_lock_head *lhead = lck->lk_head; ++ int rc; ++ ++ BUG_ON(lhead == NULL); ++ BUG_ON(lck->lk_mode == mode); ++ BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL); ++ ++ htree_node_release_all(lck); ++ ++ htree_spin_lock(lhead, HTREE_DEP_ROOT); ++ htree_unlock_internal(lck); ++ lck->lk_mode = mode; ++ rc = htree_lock_internal(lck, wait); ++ if (rc != 0) ++ htree_spin_unlock(lhead, HTREE_DEP_ROOT); ++ return rc >= 0; ++} ++EXPORT_SYMBOL(htree_change_lock_try); ++ ++/* create a htree_lock head with @depth levels (number of child-locks), ++ * it is a per resoruce structure */ ++struct htree_lock_head * ++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv) ++{ ++ struct htree_lock_head *lhead; ++ int i; ++ ++ if (depth > HTREE_LOCK_DEP_MAX) { ++ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n", ++ depth, HTREE_LOCK_DEP_MAX); ++ return NULL; ++ } ++ ++ lhead = kzalloc(offsetof(struct htree_lock_head, ++ lh_children[depth]) + priv, GFP_NOFS); ++ if (lhead == NULL) ++ return NULL; ++ ++ if (hbits < HTREE_HBITS_MIN) ++ lhead->lh_hbits = HTREE_HBITS_MIN; ++ else if (hbits > HTREE_HBITS_MAX) ++ lhead->lh_hbits = HTREE_HBITS_MAX; ++ ++ lhead->lh_lock = 0; ++ lhead->lh_depth = depth; ++ INIT_LIST_HEAD(&lhead->lh_blocked_list); ++ if (priv > 0) { ++ lhead->lh_private = (void *)lhead + ++ offsetof(struct htree_lock_head, lh_children[depth]); ++ } ++ ++ for (i = 0; i < depth; i++) { ++ INIT_LIST_HEAD(&lhead->lh_children[i].lc_list); ++ lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE; ++ } ++ return lhead; ++} ++EXPORT_SYMBOL(htree_lock_head_alloc); ++ ++/* free the htree_lock head */ ++void ++htree_lock_head_free(struct htree_lock_head *lhead) ++{ ++ int i; ++ ++ BUG_ON(!list_empty(&lhead->lh_blocked_list)); ++ for (i = 0; i < lhead->lh_depth; i++) ++ BUG_ON(!list_empty(&lhead->lh_children[i].lc_list)); ++ kfree(lhead); ++} ++EXPORT_SYMBOL(htree_lock_head_free); ++ ++/* register event callback for @events of child-lock at level @dep */ ++void ++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep, ++ unsigned events, htree_event_cb_t callback) ++{ ++ BUG_ON(lhead->lh_depth <= dep); ++ lhead->lh_children[dep].lc_events = events; ++ lhead->lh_children[dep].lc_callback = callback; ++} ++EXPORT_SYMBOL(htree_lock_event_attach); ++ ++/* allocate a htree_lock, which is per-thread structure, @pbytes is some ++ * extra-bytes as private data for caller */ ++struct htree_lock * ++htree_lock_alloc(unsigned depth, unsigned pbytes) ++{ ++ struct htree_lock *lck; ++ int i = offsetof(struct htree_lock, lk_nodes[depth]); ++ ++ if (depth > HTREE_LOCK_DEP_MAX) { ++ printk(KERN_ERR "%d is larger than max htree_lock depth %d\n", ++ depth, HTREE_LOCK_DEP_MAX); ++ return NULL; ++ } ++ lck = kzalloc(i + pbytes, GFP_NOFS); ++ if (lck == NULL) ++ return NULL; ++ ++ if (pbytes != 0) ++ lck->lk_private = (void *)lck + i; ++ lck->lk_mode = HTREE_LOCK_INVAL; ++ lck->lk_depth = depth; ++ INIT_LIST_HEAD(&lck->lk_blocked_list); ++ ++ for (i = 0; i < depth; i++) { ++ struct htree_lock_node *node = &lck->lk_nodes[i]; ++ ++ node->ln_mode = HTREE_LOCK_INVAL; ++ INIT_LIST_HEAD(&node->ln_major_list); ++ INIT_LIST_HEAD(&node->ln_minor_list); ++ INIT_LIST_HEAD(&node->ln_alive_list); ++ INIT_LIST_HEAD(&node->ln_blocked_list); ++ INIT_LIST_HEAD(&node->ln_granted_list); ++ } ++ ++ return lck; ++} ++EXPORT_SYMBOL(htree_lock_alloc); ++ ++/* free htree_lock node */ ++void ++htree_lock_free(struct htree_lock *lck) ++{ ++ BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL); ++ kfree(lck); ++} ++EXPORT_SYMBOL(htree_lock_free); +Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile +=================================================================== +--- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/Makefile ++++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/Makefile +@@ -6,6 +6,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o + + ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ + ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ ++ htree_lock.o \ + ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \ + mmp.o indirect.o extents_status.o xattr.o xattr_user.o \ + xattr_trusted.o inline.o +Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h +=================================================================== +--- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/ext4.h ++++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/ext4.h +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -821,6 +822,9 @@ struct ext4_inode_info { + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ++ /* following fields for parallel directory operations -bzzz */ ++ struct semaphore i_append_sem; ++ + /* + * i_block_group is the number of the block group which contains + * this file's inode. Constant across the lifetime of the inode, +@@ -1846,6 +1850,71 @@ struct dx_hash_info + */ + #define HASH_NB_ALWAYS 1 + ++/* assume name-hash is protected by upper layer */ ++#define EXT4_HTREE_LOCK_HASH 0 ++ ++enum ext4_pdo_lk_types { ++#if EXT4_HTREE_LOCK_HASH ++ EXT4_LK_HASH, ++#endif ++ EXT4_LK_DX, /* index block */ ++ EXT4_LK_DE, /* directory entry block */ ++ EXT4_LK_SPIN, /* spinlock */ ++ EXT4_LK_MAX, ++}; ++ ++/* read-only bit */ ++#define EXT4_LB_RO(b) (1 << (b)) ++/* read + write, high bits for writer */ ++#define EXT4_LB_RW(b) ((1 << (b)) | (1 << (EXT4_LK_MAX + (b)))) ++ ++enum ext4_pdo_lock_bits { ++ /* DX lock bits */ ++ EXT4_LB_DX_RO = EXT4_LB_RO(EXT4_LK_DX), ++ EXT4_LB_DX = EXT4_LB_RW(EXT4_LK_DX), ++ /* DE lock bits */ ++ EXT4_LB_DE_RO = EXT4_LB_RO(EXT4_LK_DE), ++ EXT4_LB_DE = EXT4_LB_RW(EXT4_LK_DE), ++ /* DX spinlock bits */ ++ EXT4_LB_SPIN_RO = EXT4_LB_RO(EXT4_LK_SPIN), ++ EXT4_LB_SPIN = EXT4_LB_RW(EXT4_LK_SPIN), ++ /* accurate searching */ ++ EXT4_LB_EXACT = EXT4_LB_RO(EXT4_LK_MAX << 1), ++}; ++ ++enum ext4_pdo_lock_opc { ++ /* external */ ++ EXT4_HLOCK_READDIR = (EXT4_LB_DE_RO | EXT4_LB_DX_RO), ++ EXT4_HLOCK_LOOKUP = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO | ++ EXT4_LB_EXACT), ++ EXT4_HLOCK_DEL = (EXT4_LB_DE | EXT4_LB_SPIN_RO | ++ EXT4_LB_EXACT), ++ EXT4_HLOCK_ADD = (EXT4_LB_DE | EXT4_LB_SPIN_RO), ++ ++ /* internal */ ++ EXT4_HLOCK_LOOKUP_SAFE = (EXT4_LB_DE_RO | EXT4_LB_DX_RO | ++ EXT4_LB_EXACT), ++ EXT4_HLOCK_DEL_SAFE = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT), ++ EXT4_HLOCK_SPLIT = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN), ++}; ++ ++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits); ++#define ext4_htree_lock_head_free(lhead) htree_lock_head_free(lhead) ++ ++extern struct htree_lock *ext4_htree_lock_alloc(void); ++#define ext4_htree_lock_free(lck) htree_lock_free(lck) ++ ++extern void ext4_htree_lock(struct htree_lock *lck, ++ struct htree_lock_head *lhead, ++ struct inode *dir, unsigned flags); ++#define ext4_htree_unlock(lck) htree_unlock(lck) ++ ++extern struct buffer_head *__ext4_find_entry(struct inode *dir, ++ const struct qstr *d_name, ++ struct ext4_dir_entry_2 **res_dir, ++ int *inlined, struct htree_lock *lck); ++extern int __ext4_add_entry(handle_t *handle, struct dentry *dentry, ++ struct inode *inode, struct htree_lock *lck); + + /* + * Describe an inode's exact location on disk and in memory +@@ -2088,9 +2157,17 @@ void ext4_insert_dentry(struct inode *in + const char *name, int namelen, void *data); + static inline void ext4_update_dx_flag(struct inode *inode) + { ++ /* Disable it for ldiskfs, because going from a DX directory to ++ * a non-DX directory while it is in use will completely break ++ * the htree-locking. ++ * If we really want to support this operation in the future, ++ * we need to exclusively lock the directory at here which will ++ * increase complexity of code */ ++#if 0 + if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_COMPAT_DIR_INDEX)) + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); ++#endif + } + static unsigned char ext4_filetype_table[] = { + DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK +Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c +=================================================================== +--- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/namei.c ++++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/namei.c +@@ -53,6 +53,7 @@ struct buffer_head *ext4_append(handle_t + ext4_lblk_t *block) + { + struct buffer_head *bh; ++ struct ext4_inode_info *ei = EXT4_I(inode); + int err = 0; + + if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && +@@ -60,15 +61,22 @@ struct buffer_head *ext4_append(handle_t + EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) + return ERR_PTR(-ENOSPC); + ++ /* with parallel dir operations all appends ++ * have to be serialized -bzzz */ ++ down(&ei->i_append_sem); ++ + *block = inode->i_size >> inode->i_sb->s_blocksize_bits; + + bh = ext4_bread(handle, inode, *block, 1, &err); +- if (!bh) ++ if (!bh) { ++ up(&ei->i_append_sem); + return ERR_PTR(err); ++ } + inode->i_size += inode->i_sb->s_blocksize; + EXT4_I(inode)->i_disksize = inode->i_size; + BUFFER_TRACE(bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, bh); ++ up(&ei->i_append_sem); + if (err) { + brelse(bh); + ext4_std_error(inode->i_sb, err); +@@ -246,7 +254,7 @@ static struct dx_frame *dx_probe(const s + struct inode *dir, + struct dx_hash_info *hinfo, + struct dx_frame *frame, +- int *err); ++ struct htree_lock *lck, int *err); + static void dx_release(struct dx_frame *frames); + static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, + struct dx_hash_info *hinfo, struct dx_map_entry map[]); +@@ -259,13 +267,13 @@ static void dx_insert_block(struct dx_fr + static int ext4_htree_next_block(struct inode *dir, __u32 hash, + struct dx_frame *frame, + struct dx_frame *frames, +- __u32 *start_hash); ++ __u32 *start_hash, struct htree_lock *lck); + static struct buffer_head * ext4_dx_find_entry(struct inode *dir, + const struct qstr *d_name, + struct ext4_dir_entry_2 **res_dir, +- int *err); ++ struct htree_lock *lck, int *err); + static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, +- struct inode *inode); ++ struct inode *inode, struct htree_lock *lck); + + /* checksumming functions */ + void initialize_dirent_tail(struct ext4_dir_entry_tail *t, +@@ -668,6 +676,227 @@ struct stats dx_show_entries(struct dx_h + } + #endif /* DX_DEBUG */ + ++/* private data for htree_lock */ ++struct ext4_dir_lock_data { ++ unsigned ld_flags; /* bits-map for lock types */ ++ unsigned ld_count; /* # entries of the last DX block */ ++ struct dx_entry ld_at_entry; /* copy of leaf dx_entry */ ++ struct dx_entry *ld_at; /* position of leaf dx_entry */ ++}; ++ ++#define ext4_htree_lock_data(l) ((struct ext4_dir_lock_data *)(l)->lk_private) ++#define ext4_find_entry(dir, name, dirent, inline) \ ++ __ext4_find_entry(dir, name, dirent, inline, NULL) ++#define ext4_add_entry(handle, dentry, inode) \ ++ __ext4_add_entry(handle, dentry, inode, NULL) ++ ++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */ ++#define EXT4_HTREE_NODE_CHANGED (0xcafeULL << 32) ++ ++static void ext4_htree_event_cb(void *target, void *event) ++{ ++ u64 *block = (u64 *)target; ++ ++ if (*block == dx_get_block((struct dx_entry *)event)) ++ *block = EXT4_HTREE_NODE_CHANGED; ++} ++ ++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits) ++{ ++ struct htree_lock_head *lhead; ++ ++ lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0); ++ if (lhead != NULL) { ++ htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR, ++ ext4_htree_event_cb); ++ } ++ return lhead; ++} ++EXPORT_SYMBOL(ext4_htree_lock_head_alloc); ++ ++struct htree_lock *ext4_htree_lock_alloc(void) ++{ ++ return htree_lock_alloc(EXT4_LK_MAX, ++ sizeof(struct ext4_dir_lock_data)); ++} ++EXPORT_SYMBOL(ext4_htree_lock_alloc); ++ ++static htree_lock_mode_t ext4_htree_mode(unsigned flags) ++{ ++ switch (flags) { ++ default: /* 0 or unknown flags require EX lock */ ++ return HTREE_LOCK_EX; ++ case EXT4_HLOCK_READDIR: ++ return HTREE_LOCK_PR; ++ case EXT4_HLOCK_LOOKUP: ++ return HTREE_LOCK_CR; ++ case EXT4_HLOCK_DEL: ++ case EXT4_HLOCK_ADD: ++ return HTREE_LOCK_CW; ++ } ++} ++ ++/* return PR for read-only operations, otherwise return EX */ ++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags) ++{ ++ int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE; ++ ++ /* 0 requires EX lock */ ++ return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR; ++} ++ ++static int ext4_htree_safe_locked(struct htree_lock *lck) ++{ ++ int writer; ++ ++ if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX) ++ return 1; ++ ++ writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) == ++ EXT4_LB_DE; ++ if (writer) /* all readers & writers are excluded? */ ++ return lck->lk_mode == HTREE_LOCK_EX; ++ ++ /* all writers are excluded? */ ++ return lck->lk_mode == HTREE_LOCK_PR || ++ lck->lk_mode == HTREE_LOCK_PW || ++ lck->lk_mode == HTREE_LOCK_EX; ++} ++ ++/* relock htree_lock with EX mode if it's change operation, otherwise ++ * relock it with PR mode. It's noop if PDO is disabled. */ ++static void ext4_htree_safe_relock(struct htree_lock *lck) ++{ ++ if (!ext4_htree_safe_locked(lck)) { ++ unsigned flags = ext4_htree_lock_data(lck)->ld_flags; ++ ++ htree_change_lock(lck, ext4_htree_safe_mode(flags)); ++ } ++} ++ ++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead, ++ struct inode *dir, unsigned flags) ++{ ++ htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) : ++ ext4_htree_safe_mode(flags); ++ ++ ext4_htree_lock_data(lck)->ld_flags = flags; ++ htree_lock(lck, lhead, mode); ++ if (!is_dx(dir)) ++ ext4_htree_safe_relock(lck); /* make sure it's safe locked */ ++} ++EXPORT_SYMBOL(ext4_htree_lock); ++ ++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at, ++ unsigned lmask, int wait, void *ev) ++{ ++ u32 key = (at == NULL) ? 0 : dx_get_block(at); ++ u32 mode; ++ ++ /* NOOP if htree is well protected or caller doesn't require the lock */ ++ if (ext4_htree_safe_locked(lck) || ++ !(ext4_htree_lock_data(lck)->ld_flags & lmask)) ++ return 1; ++ ++ mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ? ++ HTREE_LOCK_PW : HTREE_LOCK_PR; ++ while (1) { ++ if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev)) ++ return 1; ++ if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */ ++ return 0; ++ cpu_relax(); /* spin until granted */ ++ } ++} ++ ++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask) ++{ ++ return ext4_htree_safe_locked(lck) || ++ htree_node_is_granted(lck, ffz(~lmask)); ++} ++ ++static void ext4_htree_node_unlock(struct htree_lock *lck, ++ unsigned lmask, void *buf) ++{ ++ /* NB: it's safe to call mutiple times or even it's not locked */ ++ if (!ext4_htree_safe_locked(lck) && ++ htree_node_is_granted(lck, ffz(~lmask))) ++ htree_node_unlock(lck, ffz(~lmask), buf); ++} ++ ++#define ext4_htree_dx_lock(lck, key) \ ++ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL) ++#define ext4_htree_dx_lock_try(lck, key) \ ++ ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL) ++#define ext4_htree_dx_unlock(lck) \ ++ ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL) ++#define ext4_htree_dx_locked(lck) \ ++ ext4_htree_node_locked(lck, EXT4_LB_DX) ++ ++static void ext4_htree_dx_need_lock(struct htree_lock *lck) ++{ ++ struct ext4_dir_lock_data *ld; ++ ++ if (ext4_htree_safe_locked(lck)) ++ return; ++ ++ ld = ext4_htree_lock_data(lck); ++ switch (ld->ld_flags) { ++ default: ++ return; ++ case EXT4_HLOCK_LOOKUP: ++ ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE; ++ return; ++ case EXT4_HLOCK_DEL: ++ ld->ld_flags = EXT4_HLOCK_DEL_SAFE; ++ return; ++ case EXT4_HLOCK_ADD: ++ ld->ld_flags = EXT4_HLOCK_SPLIT; ++ return; ++ } ++} ++ ++#define ext4_htree_de_lock(lck, key) \ ++ ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL) ++#define ext4_htree_de_unlock(lck) \ ++ ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL) ++ ++#define ext4_htree_spin_lock(lck, key, event) \ ++ ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event) ++#define ext4_htree_spin_unlock(lck) \ ++ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL) ++#define ext4_htree_spin_unlock_listen(lck, p) \ ++ ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p) ++ ++static void ext4_htree_spin_stop_listen(struct htree_lock *lck) ++{ ++ if (!ext4_htree_safe_locked(lck) && ++ htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN))) ++ htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN)); ++} ++ ++enum { ++ DX_HASH_COL_IGNORE, /* ignore collision while probing frames */ ++ DX_HASH_COL_YES, /* there is collision and it does matter */ ++ DX_HASH_COL_NO, /* there is no collision */ ++}; ++ ++static int dx_probe_hash_collision(struct htree_lock *lck, ++ struct dx_entry *entries, ++ struct dx_entry *at, u32 hash) ++{ ++ if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) { ++ return DX_HASH_COL_IGNORE; /* don't care about collision */ ++ ++ } else if (at == entries + dx_get_count(entries) - 1) { ++ return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */ ++ ++ } else { /* hash collision? */ ++ return ((dx_get_hash(at + 1) & ~1) == hash) ? ++ DX_HASH_COL_YES : DX_HASH_COL_NO; ++ } ++} ++ + /* + * Probe for a directory leaf block to search. + * +@@ -679,10 +908,11 @@ struct stats dx_show_entries(struct dx_h + */ + static struct dx_frame * + dx_probe(const struct qstr *d_name, struct inode *dir, +- struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) ++ struct dx_hash_info *hinfo, struct dx_frame *frame_in, ++ struct htree_lock *lck, int *err) + { + unsigned count, indirect; +- struct dx_entry *at, *entries, *p, *q, *m; ++ struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL; + struct dx_root_info *info; + struct buffer_head *bh; + struct dx_frame *frame = frame_in; +@@ -750,8 +980,15 @@ dx_probe(const struct qstr *d_name, stru + dxtrace(printk("Look up %x", hash)); + while (1) + { ++ if (indirect == 0) { /* the last index level */ ++ /* NB: ext4_htree_dx_lock() could be noop if ++ * DX-lock flag is not set for current operation */ ++ ext4_htree_dx_lock(lck, dx); ++ ext4_htree_spin_lock(lck, dx, NULL); ++ } + count = dx_get_count(entries); +- if (!count || count > dx_get_limit(entries)) { ++ if (count == 0 || count > dx_get_limit(entries)) { ++ ext4_htree_spin_unlock(lck); /* release spin */ + ext4_warning(dir->i_sb, + "dx entry: no count or count > limit"); + brelse(bh); +@@ -792,7 +1029,70 @@ dx_probe(const struct qstr *d_name, stru + frame->bh = bh; + frame->entries = entries; + frame->at = at; +- if (!indirect--) return frame; ++ ++ if (indirect == 0) { /* the last index level */ ++ struct ext4_dir_lock_data *ld; ++ u64 myblock; ++ ++ /* By default we only lock DE-block, however, we will ++ * also lock the last level DX-block if: ++ * a) there is hash collision ++ * we will set DX-lock flag (a few lines below) ++ * and redo to lock DX-block ++ * see detail in dx_probe_hash_collision() ++ * b) it's a retry from splitting ++ * we need to lock the last level DX-block so nobody ++ * else can split any leaf blocks under the same ++ * DX-block, see detail in ext4_dx_add_entry() ++ */ ++ if (ext4_htree_dx_locked(lck)) { ++ /* DX-block is locked, just lock DE-block ++ * and return */ ++ ext4_htree_spin_unlock(lck); ++ if (!ext4_htree_safe_locked(lck)) ++ ext4_htree_de_lock(lck, frame->at); ++ return frame; ++ } ++ /* it's pdirop and no DX lock */ ++ if (dx_probe_hash_collision(lck, entries, at, hash) == ++ DX_HASH_COL_YES) { ++ /* found hash collision, set DX-lock flag ++ * and retry to abtain DX-lock */ ++ ext4_htree_spin_unlock(lck); ++ ext4_htree_dx_need_lock(lck); ++ continue; ++ } ++ ld = ext4_htree_lock_data(lck); ++ /* because I don't lock DX, so @at can't be trusted ++ * after I release spinlock so I have to save it */ ++ ld->ld_at = at; ++ ld->ld_at_entry = *at; ++ ld->ld_count = dx_get_count(entries); ++ ++ frame->at = &ld->ld_at_entry; ++ myblock = dx_get_block(at); ++ ++ /* NB: ordering locking */ ++ ext4_htree_spin_unlock_listen(lck, &myblock); ++ /* other thread can split this DE-block because: ++ * a) I don't have lock for the DE-block yet ++ * b) I released spinlock on DX-block ++ * if it happened I can detect it by listening ++ * splitting event on this DE-block */ ++ ext4_htree_de_lock(lck, frame->at); ++ ext4_htree_spin_stop_listen(lck); ++ ++ if (myblock == EXT4_HTREE_NODE_CHANGED) { ++ /* someone split this DE-block before ++ * I locked it, I need to retry and lock ++ * valid DE-block */ ++ ext4_htree_de_unlock(lck); ++ continue; ++ } ++ return frame; ++ } ++ dx = at; ++ indirect--; + bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); + if (IS_ERR(bh)) { + *err = PTR_ERR(bh); +@@ -860,7 +1160,7 @@ static void dx_release (struct dx_frame + static int ext4_htree_next_block(struct inode *dir, __u32 hash, + struct dx_frame *frame, + struct dx_frame *frames, +- __u32 *start_hash) ++ __u32 *start_hash, struct htree_lock *lck) + { + struct dx_frame *p; + struct buffer_head *bh; +@@ -875,12 +1175,22 @@ static int ext4_htree_next_block(struct + * this loop, num_frames indicates the number of interior + * nodes need to be read. + */ ++ ext4_htree_de_unlock(lck); + while (1) { +- if (++(p->at) < p->entries + dx_get_count(p->entries)) +- break; ++ if (num_frames > 0 || ext4_htree_dx_locked(lck)) { ++ /* num_frames > 0 : ++ * DX block ++ * ext4_htree_dx_locked: ++ * frame->at is reliable pointer returned by dx_probe, ++ * otherwise dx_probe already knew no collision */ ++ if (++(p->at) < p->entries + dx_get_count(p->entries)) ++ break; ++ } + if (p == frames) + return 0; + num_frames++; ++ if (num_frames == 1) ++ ext4_htree_dx_unlock(lck); + p--; + } + +@@ -903,6 +1213,13 @@ static int ext4_htree_next_block(struct + * block so no check is necessary + */ + while (num_frames--) { ++ if (num_frames == 0) { ++ /* it's not always necessary, we just don't want to ++ * detect hash collision again */ ++ ext4_htree_dx_need_lock(lck); ++ ext4_htree_dx_lock(lck, p->at); ++ } ++ + bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX); + if (IS_ERR(bh)) + return PTR_ERR(bh); +@@ -911,6 +1228,7 @@ static int ext4_htree_next_block(struct + p->bh = bh; + p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; + } ++ ext4_htree_de_lock(lck, p->at); + return 1; + } + +@@ -1013,10 +1331,10 @@ int ext4_htree_fill_tree(struct file *di + } + hinfo.hash = start_hash; + hinfo.minor_hash = 0; +- frame = dx_probe(NULL, dir, &hinfo, frames, &err); ++ /* assume it's PR locked */ ++ frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err); + if (!frame) + return err; +- + /* Add '.' and '..' from the htree header */ + if (!start_hash && !start_minor_hash) { + de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; +@@ -1043,7 +1361,7 @@ int ext4_htree_fill_tree(struct file *di + count += ret; + hashval = ~0; + ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, +- frame, frames, &hashval); ++ frame, frames, &hashval, NULL); + *next_hash = hashval; + if (ret < 0) { + err = ret; +@@ -1236,10 +1554,10 @@ static int is_dx_internal_node(struct in + * The returned buffer_head has ->b_count elevated. The caller is expected + * to brelse() it when appropriate. + */ +-static struct buffer_head * ext4_find_entry (struct inode *dir, ++struct buffer_head *__ext4_find_entry(struct inode *dir, + const struct qstr *d_name, + struct ext4_dir_entry_2 **res_dir, +- int *inlined) ++ int *inlined, struct htree_lock *lck) + { + struct super_block *sb; + struct buffer_head *bh_use[NAMEI_RA_SIZE]; +@@ -1283,7 +1601,7 @@ static struct buffer_head * ext4_find_en + goto restart; + } + if (is_dx(dir)) { +- bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); ++ bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err); + /* + * On success, or if the error was file not found, + * return. Otherwise, fall back to doing a search the +@@ -1297,6 +1615,7 @@ static struct buffer_head * ext4_find_en + return bh; + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " + "falling back\n")); ++ ext4_htree_safe_relock(lck); + } + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); + start = EXT4_I(dir)->i_dir_start_lookup; +@@ -1389,9 +1708,12 @@ cleanup_and_exit: + brelse(bh_use[ra_ptr]); + return ret; + } ++EXPORT_SYMBOL(__ext4_find_entry); + +-static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, +- struct ext4_dir_entry_2 **res_dir, int *err) ++static struct buffer_head *ext4_dx_find_entry(struct inode *dir, ++ const struct qstr *d_name, ++ struct ext4_dir_entry_2 **res_dir, ++ struct htree_lock *lck, int *err) + { + struct super_block * sb = dir->i_sb; + struct dx_hash_info hinfo; +@@ -1400,7 +1722,7 @@ static struct buffer_head * ext4_dx_find + ext4_lblk_t block; + int retval; + +- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err))) ++ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err))) + return NULL; + do { + block = dx_get_block(frame->at); +@@ -1424,7 +1746,7 @@ static struct buffer_head * ext4_dx_find + + /* Check to see if we should continue to search */ + retval = ext4_htree_next_block(dir, hinfo.hash, frame, +- frames, NULL); ++ frames, NULL, lck); + if (retval < 0) { + ext4_warning(sb, + "error reading index page in directory #%lu", +@@ -1583,8 +1905,9 @@ static struct ext4_dir_entry_2* dx_pack_ + * Returns pointer to de in block into which the new entry will be inserted. + */ + static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, +- struct buffer_head **bh,struct dx_frame *frame, +- struct dx_hash_info *hinfo, int *error) ++ struct buffer_head **bh, struct dx_frame *frames, ++ struct dx_frame *frame, struct dx_hash_info *hinfo, ++ struct htree_lock *lck, int *error) + { + unsigned blocksize = dir->i_sb->s_blocksize; + unsigned count, continued; +@@ -1647,7 +1970,14 @@ static struct ext4_dir_entry_2 *do_split + hash2, split, count-split)); + + /* Fancy dance to stay within two buffers */ +- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); ++ if (hinfo->hash < hash2) { ++ de2 = dx_move_dirents(data1, data2, map + split, ++ count - split, blocksize); ++ } else { ++ /* make sure we will add entry to the same block which ++ * we have already locked */ ++ de2 = dx_move_dirents(data1, data2, map, split, blocksize); ++ } + de = dx_pack_dirents(data1, blocksize); + de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - + (char *) de, +@@ -1666,13 +1996,21 @@ static struct ext4_dir_entry_2 *do_split + dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); + dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); + +- /* Which block gets the new entry? */ +- if (hinfo->hash >= hash2) +- { +- swap(*bh, bh2); +- de = de2; ++ ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL, ++ frame->at); /* notify block is being split */ ++ if (hinfo->hash < hash2) { ++ dx_insert_block(frame, hash2 + continued, newblock); ++ ++ } else { ++ /* switch block number */ ++ dx_insert_block(frame, hash2 + continued, ++ dx_get_block(frame->at)); ++ dx_set_block(frame->at, newblock); ++ (frame->at)++; + } +- dx_insert_block(frame, hash2 + continued, newblock); ++ ext4_htree_spin_unlock(lck); ++ ext4_htree_dx_unlock(lck); ++ + err = ext4_handle_dirty_dirent_node(handle, dir, bh2); + if (err) + goto journal_error; +@@ -1945,7 +2283,7 @@ static int make_indexed_dir(handle_t *ha + ext4_handle_dirty_dx_node(handle, dir, frame->bh); + ext4_handle_dirty_dirent_node(handle, dir, bh); + +- de = do_split(handle,dir, &bh, frame, &hinfo, &retval); ++ de = do_split(handle, dir, &bh, frames, frame, &hinfo, NULL, &retval); + if (!de) { + /* + * Even if the block split failed, we have to properly write +@@ -2051,8 +2389,8 @@ out: + * may not sleep between calling this and putting something into + * the entry, as someone else might have used it while you slept. + */ +-static int ext4_add_entry(handle_t *handle, struct dentry *dentry, +- struct inode *inode) ++int __ext4_add_entry(handle_t *handle, struct dentry *dentry, ++ struct inode *inode, struct htree_lock *lck) + { + struct inode *dir = dentry->d_parent->d_inode; + struct buffer_head *bh; +@@ -2087,9 +2425,10 @@ static int ext4_add_entry(handle_t *hand + if (dentry->d_name.len == 2 && + memcmp(dentry->d_name.name, "..", 2) == 0) + return ext4_update_dotdot(handle, dentry, inode); +- retval = ext4_dx_add_entry(handle, dentry, inode); ++ retval = ext4_dx_add_entry(handle, dentry, inode, lck); + if (!retval || (retval != ERR_BAD_DX_DIR)) + goto out; ++ ext4_htree_safe_relock(lck); + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); + dx_fallback++; + ext4_mark_inode_dirty(handle, dir); +@@ -2129,12 +2468,13 @@ static int ext4_add_entry(handle_t *hand + ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); + return retval; + } ++EXPORT_SYMBOL(__ext4_add_entry); + + /* + * Returns 0 for success, or a negative error value + */ + static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, +- struct inode *inode) ++ struct inode *inode, struct htree_lock *lck) + { + struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; + struct dx_entry *entries, *at; +@@ -2148,7 +2488,7 @@ static int ext4_dx_add_entry(handle_t *h + + again: + restart = 0; +- frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); ++ frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err); + if (!frame) + return err; + entries = frame->entries; +@@ -2178,6 +2518,11 @@ again: + struct dx_node *node2; + struct buffer_head *bh2; + ++ if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */ ++ ext4_htree_safe_relock(lck); ++ restart = 1; ++ goto cleanup; ++ } + while (frame > frames) { + if (dx_get_count((frame - 1)->entries) < + dx_get_limit((frame - 1)->entries)) { +@@ -2277,16 +2622,43 @@ again: + restart = 1; + goto cleanup; + } ++ } else if (!ext4_htree_dx_locked(lck)) { ++ struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck); ++ ++ /* not well protected, require DX lock */ ++ ext4_htree_dx_need_lock(lck); ++ at = frame > frames ? (frame - 1)->at : NULL; ++ ++ /* NB: no risk of deadlock because it's just a try. ++ * ++ * NB: we check ld_count for twice, the first time before ++ * having DX lock, the second time after holding DX lock. ++ * ++ * NB: We never free blocks for directory so far, which ++ * means value returned by dx_get_count() should equal to ++ * ld->ld_count if nobody split any DE-block under @at, ++ * and ld->ld_at still points to valid dx_entry. */ ++ if ((ld->ld_count != dx_get_count(entries)) || ++ !ext4_htree_dx_lock_try(lck, at) || ++ (ld->ld_count != dx_get_count(entries))) { ++ restart = 1; ++ goto cleanup; ++ } ++ /* OK, I've got DX lock and nothing changed */ ++ frame->at = ld->ld_at; + } +- de = do_split(handle, dir, &bh, frame, &hinfo, &err); ++ de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err); + if (!de) + goto cleanup; ++ + err = add_dirent_to_buf(handle, dentry, inode, de, bh); + goto cleanup; + + journal_error: + ext4_std_error(dir->i_sb, err); + cleanup: ++ ext4_htree_dx_unlock(lck); ++ ext4_htree_de_unlock(lck); + brelse(bh); + dx_release(frames); + /* @restart is true means htree-path has been changed, we need to +Index: linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c +=================================================================== +--- linux-3.10.0-229.1.2.fc21.x86_64.orig/fs/ext4/super.c ++++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/super.c +@@ -875,5 +875,6 @@ static struct inode *ext4_alloc_inode(st + + ei->vfs_inode.i_version = 1; ++ sema_init(&ei->i_append_sem, 1); + INIT_LIST_HEAD(&ei->i_prealloc_list); + spin_lock_init(&ei->i_prealloc_lock); + ext4_es_init_tree(&ei->i_es_tree); diff --git a/ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch b/ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch new file mode 100644 index 0000000..394c3e9 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/sles12/ext4-prealloc.patch @@ -0,0 +1,391 @@ +Index: linux-3.12.39-47.1/fs/ext4/ext4.h +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/ext4.h ++++ linux-3.12.39-47.1/fs/ext4/ext4.h +@@ -1251,11 +1251,14 @@ struct ext4_sb_info { + + /* tunables */ + unsigned long s_stripe; +- unsigned int s_mb_stream_request; ++ unsigned long s_mb_small_req; ++ unsigned long s_mb_large_req; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; ++ unsigned long *s_mb_prealloc_table; ++ unsigned long s_mb_prealloc_table_size; + unsigned int s_mb_group_prealloc; + unsigned int s_max_dir_size_kb; + /* where last allocation was done - for stream allocation */ +Index: linux-3.12.39-47.1/fs/ext4/mballoc.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/mballoc.c ++++ linux-3.12.39-47.1/fs/ext4/mballoc.c +@@ -1847,6 +1847,25 @@ int ext4_mb_find_by_goal(struct ext4_all + return 0; + } + ++static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value) ++{ ++ int i; ++ ++ if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group)) ++ return; ++ ++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) { ++ if (sbi->s_mb_prealloc_table[i] == 0) { ++ sbi->s_mb_prealloc_table[i] = value; ++ return; ++ } ++ ++ /* they should add values in order */ ++ if (value <= sbi->s_mb_prealloc_table[i]) ++ return; ++ } ++} ++ + /* + * The routine scans buddy structures (not bitmap!) from given order + * to max order and tries to find big enough chunk to satisfy the req +@@ -2285,6 +2304,91 @@ static const struct seq_operations ext4_ + .show = ext4_mb_seq_groups_show, + }; + ++#define EXT4_MB_PREALLOC_TABLE "prealloc_table" ++ ++static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file, ++ const char __user *buf, ++ size_t cnt, loff_t *pos) ++{ ++ struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file))); ++ unsigned long value; ++ unsigned long prev = 0; ++ char str[128]; ++ char *cur; ++ char *end; ++ unsigned long *new_table; ++ int num = 0; ++ int i = 0; ++ ++ if (cnt >= sizeof(str)) ++ return -EINVAL; ++ if (copy_from_user(str, buf, cnt)) ++ return -EFAULT; ++ ++ num = 0; ++ cur = str; ++ end = str + cnt; ++ while (cur < end) { ++ int rc; ++ while ((cur < end) && (*cur == ' ')) ++ cur++; ++ rc = kstrtol(cur, 0, &value); ++ if (rc != 0) ++ return -EINVAL; ++ if (value == 0) ++ break; ++ if (value <= prev) ++ return -EINVAL; ++ prev = value; ++ num++; ++ } ++ ++ new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL); ++ if (new_table == NULL) ++ return -ENOMEM; ++ kfree(sbi->s_mb_prealloc_table); ++ memset(new_table, 0, num * sizeof(*new_table)); ++ sbi->s_mb_prealloc_table = new_table; ++ sbi->s_mb_prealloc_table_size = num; ++ cur = str; ++ end = str + cnt; ++ while (cur < end && i < num) { ++ while (cur < end && *cur == ' ') ++ cur++; ++ value = simple_strtol(cur, &cur, 0); ++ ext4_mb_prealloc_table_add(sbi, value); ++ i++; ++ } ++ ++ return cnt; ++} ++ ++static int mb_prealloc_table_seq_show(struct seq_file *m, void *v) ++{ ++ struct ext4_sb_info *sbi = EXT4_SB(m->private); ++ int i; ++ ++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) ++ seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]); ++ seq_printf(m, "\n"); ++ ++ return 0; ++} ++ ++static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode)); ++} ++ ++static const struct file_operations ext4_mb_prealloc_seq_fops = { ++ .owner = THIS_MODULE, ++ .open = mb_prealloc_table_seq_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ .write = ext4_mb_prealloc_table_proc_write, ++}; ++ + static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) + { + struct super_block *sb = PDE_DATA(inode); +@@ -2579,7 +2683,6 @@ int ext4_mb_init(struct super_block *sb) + sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; + sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; + sbi->s_mb_stats = MB_DEFAULT_STATS; +- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; + sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; + /* + * The default group preallocation is 512, which for 4k block +@@ -2603,9 +2706,48 @@ int ext4_mb_init(struct super_block *sb) + * RAID stripe size so that preallocations don't fragment + * the stripes. + */ +- if (sbi->s_stripe > 1) { +- sbi->s_mb_group_prealloc = roundup( +- sbi->s_mb_group_prealloc, sbi->s_stripe); ++ ++ if (sbi->s_stripe == 0) { ++ sbi->s_mb_prealloc_table_size = 10; ++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long); ++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS); ++ if (sbi->s_mb_prealloc_table == NULL) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ memset(sbi->s_mb_prealloc_table, 0, i); ++ ++ ext4_mb_prealloc_table_add(sbi, 4); ++ ext4_mb_prealloc_table_add(sbi, 8); ++ ext4_mb_prealloc_table_add(sbi, 16); ++ ext4_mb_prealloc_table_add(sbi, 32); ++ ext4_mb_prealloc_table_add(sbi, 64); ++ ext4_mb_prealloc_table_add(sbi, 128); ++ ext4_mb_prealloc_table_add(sbi, 256); ++ ext4_mb_prealloc_table_add(sbi, 512); ++ ext4_mb_prealloc_table_add(sbi, 1024); ++ ext4_mb_prealloc_table_add(sbi, 2048); ++ ++ sbi->s_mb_small_req = 256; ++ sbi->s_mb_large_req = 1024; ++ sbi->s_mb_group_prealloc = 512; ++ } else { ++ sbi->s_mb_prealloc_table_size = 3; ++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long); ++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS); ++ if (sbi->s_mb_prealloc_table == NULL) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ memset(sbi->s_mb_prealloc_table, 0, i); ++ ++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe); ++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2); ++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4); ++ ++ sbi->s_mb_small_req = sbi->s_stripe; ++ sbi->s_mb_large_req = sbi->s_stripe * 8; ++ sbi->s_mb_group_prealloc = sbi->s_stripe * 4; + } + + sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); +@@ -2627,9 +2769,13 @@ int ext4_mb_init(struct super_block *sb) + if (ret != 0) + goto out_free_locality_groups; + +- if (sbi->s_proc) ++ if (sbi->s_proc) { + proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, + &ext4_mb_seq_groups_fops, sb); ++ proc_create_data(EXT4_MB_PREALLOC_TABLE, S_IFREG | S_IRUGO | ++ S_IWUSR, sbi->s_proc, ++ &ext4_mb_prealloc_seq_fops, sb); ++ } + + return 0; + +@@ -2639,6 +2785,7 @@ out_free_locality_groups: + out_free_groupinfo_slab: + ext4_groupinfo_destroy_slabs(); + out: ++ kfree(sbi->s_mb_prealloc_table); + kfree(sbi->s_mb_offsets); + sbi->s_mb_offsets = NULL; + kfree(sbi->s_mb_maxs); +@@ -2673,8 +2820,10 @@ int ext4_mb_release(struct super_block * + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); + +- if (sbi->s_proc) ++ if (sbi->s_proc) { + remove_proc_entry("mb_groups", sbi->s_proc); ++ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc); ++ } + + if (sbi->s_group_info) { + for (i = 0; i < ngroups; i++) { +@@ -2985,9 +3134,9 @@ ext4_mb_normalize_request(struct ext4_al + struct ext4_allocation_request *ar) + { + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); +- int bsbits, max; ++ int bsbits, i, wind; + ext4_lblk_t end; +- loff_t size, start_off; ++ loff_t size; + loff_t orig_size __maybe_unused; + ext4_lblk_t start; + struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); +@@ -3020,50 +3169,34 @@ ext4_mb_normalize_request(struct ext4_al + size = size << bsbits; + if (size < i_size_read(ac->ac_inode)) + size = i_size_read(ac->ac_inode); +- orig_size = size; ++ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits; + +- /* max size of free chunks */ +- max = 2 << bsbits; ++ start = wind = 0; + +-#define NRL_CHECK_SIZE(req, size, max, chunk_size) \ +- (req <= (size) || max <= (chunk_size)) ++ /* let's choose preallocation window depending on file size */ ++ for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) { ++ if (size <= sbi->s_mb_prealloc_table[i]) { ++ wind = sbi->s_mb_prealloc_table[i]; ++ break; ++ } ++ } ++ size = wind; + +- /* first, try to predict filesize */ +- /* XXX: should this table be tunable? */ +- start_off = 0; +- if (size <= 16 * 1024) { +- size = 16 * 1024; +- } else if (size <= 32 * 1024) { +- size = 32 * 1024; +- } else if (size <= 64 * 1024) { +- size = 64 * 1024; +- } else if (size <= 128 * 1024) { +- size = 128 * 1024; +- } else if (size <= 256 * 1024) { +- size = 256 * 1024; +- } else if (size <= 512 * 1024) { +- size = 512 * 1024; +- } else if (size <= 1024 * 1024) { +- size = 1024 * 1024; +- } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { +- start_off = ((loff_t)ac->ac_o_ex.fe_logical >> +- (21 - bsbits)) << 21; +- size = 2 * 1024 * 1024; +- } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { +- start_off = ((loff_t)ac->ac_o_ex.fe_logical >> +- (22 - bsbits)) << 22; +- size = 4 * 1024 * 1024; +- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, +- (8<<20)>>bsbits, max, 8 * 1024)) { +- start_off = ((loff_t)ac->ac_o_ex.fe_logical >> +- (23 - bsbits)) << 23; +- size = 8 * 1024 * 1024; +- } else { +- start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; +- size = ac->ac_o_ex.fe_len << bsbits; ++ if (wind == 0) { ++ __u64 tstart, tend; ++ /* file is quite large, we now preallocate with ++ * the biggest configured window with regart to ++ * logical offset */ ++ wind = sbi->s_mb_prealloc_table[i - 1]; ++ tstart = ac->ac_o_ex.fe_logical; ++ do_div(tstart, wind); ++ start = tstart * wind; ++ tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1; ++ do_div(tend, wind); ++ tend = tend * wind + wind; ++ size = tend - start; + } +- size = size >> bsbits; +- start = start_off >> bsbits; ++ orig_size = size; + + /* don't cover already allocated blocks in selected range */ + if (ar->pleft && start <= ar->lleft) { +@@ -3139,7 +3272,6 @@ ext4_mb_normalize_request(struct ext4_al + } + BUG_ON(start + size <= ac->ac_o_ex.fe_logical && + start > ac->ac_o_ex.fe_logical); +- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); + + /* now prepare goal request */ + +@@ -4105,11 +4237,19 @@ static void ext4_mb_group_or_file(struct + + /* don't use group allocation for large files */ + size = max(size, isize); +- if (size > sbi->s_mb_stream_request) { ++ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) || ++ (size >= sbi->s_mb_large_req)) { + ac->ac_flags |= EXT4_MB_STREAM_ALLOC; + return; + } + ++ /* ++ * request is so large that we don't care about ++ * streaming - it overweights any possible seek ++ */ ++ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req) ++ return; ++ + BUG_ON(ac->ac_lg != NULL); + /* + * locality group prealloc space are per cpu. The reason for having +Index: linux-3.12.39-47.1/fs/ext4/super.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/super.c ++++ linux-3.12.39-47.1/fs/ext4/super.c +@@ -2592,7 +2592,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats + EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); + EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); + EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); +-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); ++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req); ++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req); + EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); + EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128); + EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); +@@ -2609,7 +2610,8 @@ static struct attribute *ext4_attrs[] = + ATTR_LIST(mb_max_to_scan), + ATTR_LIST(mb_min_to_scan), + ATTR_LIST(mb_order2_req), +- ATTR_LIST(mb_stream_req), ++ ATTR_LIST(mb_small_req), ++ ATTR_LIST(mb_large_req), + ATTR_LIST(mb_group_prealloc), + ATTR_LIST(max_writeback_mb_bump), + ATTR_LIST(extent_max_zeroout_kb), +Index: linux-3.12.39-47.1/fs/ext4/inode.c +=================================================================== +--- linux-3.12.39-47.1.orig/fs/ext4/inode.c ++++ linux-3.12.39-47.1/fs/ext4/inode.c +@@ -2457,6 +2457,9 @@ static int ext4_writepages(struct addres + ext4_journal_stop(handle); + } + ++ if (wbc->nr_to_write < sbi->s_mb_small_req) ++ wbc->nr_to_write = sbi->s_mb_small_req; ++ + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; + diff --git a/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series b/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series new file mode 100644 index 0000000..c2e386a --- /dev/null +++ b/ldiskfs/kernel_patches/series/ldiskfs-3.12-sles12.series @@ -0,0 +1,18 @@ +sles12/ext4-inode-version.patch +rhel7/ext4-lookup-dotdot.patch +rhel6.3/ext4-print-inum-in-htree-warning.patch +sles12/ext4-prealloc.patch +rhel7/ext4-osd-iop-common.patch +sles12/ext4-misc.patch +rhel7/ext4-mballoc-extra-checks.patch +rhel7/ext4-hash-indexed-dir-dotdot-update.patch +rhel7/ext4-kill-dx-root.patch +rhel7/ext4-mballoc-pa-free-mismatch.patch +sles12/ext4-data-in-dirent.patch +sles12/ext4-large-eas.patch +rhel7/ext4-disable-mb-cache.patch +rhel7/ext4-nocmtime.patch +rhel7/ext4-large-dir.patch +sles12/ext4-pdirop.patch +rhel7/ext4-max-dir-size.patch +rhel7/ext4-remove-truncate-warning.patch