Index: linux-stage/fs/ext4/ext4.h
===================================================================
---- linux-stage.orig/fs/ext4/ext4.h 2011-03-05 12:34:16.458850451 +0800
-+++ linux-stage/fs/ext4/ext4.h 2011-03-05 12:35:25.338882364 +0800
-@@ -405,7 +405,7 @@
+--- linux-stage.orig/fs/ext4/ext4.h
++++ linux-stage/fs/ext4/ext4.h
+@@ -405,7 +405,7 @@ struct ext4_new_group_data {
#define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
#define EXT4_IOC_MIGRATE _IO('f', 9)
/* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
Index: linux-stage/fs/ext4/ioctl.c
===================================================================
---- linux-stage.orig/fs/ext4/ioctl.c 2011-03-05 12:34:11.299779163 +0800
-+++ linux-stage/fs/ext4/ioctl.c 2011-03-05 12:34:16.862856069 +0800
+--- linux-stage.orig/fs/ext4/ioctl.c
++++ linux-stage/fs/ext4/ioctl.c
@@ -18,6 +18,71 @@
#include "ext4_jbd2.h"
#include "ext4.h"
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
-@@ -330,6 +395,9 @@
+@@ -330,6 +395,9 @@ mext_out:
mnt_drop_write(filp->f_path.mnt);
return err;
}
return -ENOTTY;
Index: linux-stage/fs/ext4/fiemap.h
===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-stage/fs/ext4/fiemap.h 2011-03-05 12:36:24.606879702 +0800
+--- /dev/null
++++ linux-stage/fs/ext4/fiemap.h
@@ -0,0 +1,2 @@
+
+#include_next <fiemap.h>
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
-@@ -3031,6 +3037,26 @@
+@@ -3031,6 +3037,16 @@
goto failed_mount;
}
+ }
+ }
+
-+ if (ext4_blocks_count(es) >= (1ULL << 32)) {
-+ if (force_over_16tb == 0) {
-+ printk(KERN_ERR "EXT4-fs does not support filesystems "
-+ "greater than 16TB and can cause data corruption."
-+ "Use \"force_over_16tb\" mount option to override."
-+ "\n");
-+ goto failed_mount;
-+ }
-+ }
-+
if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext4;
+++ /dev/null
-Index: linux-2.6.32-el6-beta/fs/ext4/inode.c
-===================================================================
---- linux-2.6.32-el6-beta.orig/fs/ext4/inode.c
-+++ linux-2.6.32-el6-beta/fs/ext4/inode.c
-@@ -5834,3 +5834,67 @@ out_unlock:
- up_read(&inode->i_alloc_sem);
- return ret;
- }
-+
-+int ext4_map_inode_page(struct inode *inode, struct page *page,
-+ unsigned long *blocks, int *created, int create)
-+{
-+ unsigned int blocksize, blocks_per_page;
-+ unsigned long iblock;
-+ struct buffer_head dummy;
-+ void *handle;
-+ int i, rc = 0, failed = 0, needed_blocks;
-+
-+ blocksize = inode->i_sb->s_blocksize;
-+ blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
-+ iblock = page->index * blocks_per_page;
-+
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ blocks[i] = ext4_bmap(inode->i_mapping, iblock);
-+ if (blocks[i] == 0) {
-+ failed++;
-+ if (created)
-+ created[i] = -1;
-+ } else if (created) {
-+ created[i] = 0;
-+ }
-+ }
-+
-+ if (failed == 0 || create == 0)
-+ return 0;
-+
-+ needed_blocks = ext4_writepage_trans_blocks(inode);
-+ handle = ext4_journal_start(inode, needed_blocks);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ iblock = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ if (blocks[i] != 0)
-+ continue;
-+
-+ rc = ext4_get_blocks(handle, inode, iblock, 1, &dummy, 1);
-+ if (rc < 0) {
-+ printk(KERN_INFO "ext4_map_inode_page: error reading "
-+ "block %ld\n", iblock);
-+ goto out;
-+ } else {
-+ if (rc > 1)
-+ WARN_ON(1);
-+
-+ rc = 0;
-+ }
-+ /* Unmap any metadata buffers from the block mapping, to avoid
-+ * data corruption due to direct-write from Lustre being
-+ * clobbered by a later flush of the blockdev metadata buffer.*/
-+ if (buffer_new(&dummy))
-+ unmap_underlying_metadata(dummy.b_bdev,
-+ dummy.b_blocknr);
-+ blocks[i] = dummy.b_blocknr;
-+ if (created)
-+ created[i] = 1;
-+ }
-+
-+out:
-+ ext4_journal_stop(handle);
-+ return rc;
-+}
-Index: linux-2.6.32-el6-beta/fs/ext4/super.c
-===================================================================
---- linux-2.6.32-el6-beta.orig/fs/ext4/super.c
-+++ linux-2.6.32-el6-beta/fs/ext4/super.c
-@@ -4084,6 +4084,10 @@ static void __exit exit_ext4_fs(void)
- exit_ext4_system_zone();
- }
-
-+int ext4_map_inode_page(struct inode *inode, struct page *page,
-+ unsigned long *blocks, int *created, int create);
-+EXPORT_SYMBOL(ext4_map_inode_page);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Fourth Extended Filesystem");
- MODULE_LICENSE("GPL");
+ ext4_journal_stop(handle);
+ return rc;
+}
++EXPORT_SYMBOL(ext4_map_inode_page);
Index: linux-2.6.18.i386/fs/ext4/super.c
===================================================================
--- linux-2.6.18.i386.orig/fs/ext4/super.c
-Index: linux-2.6.32.i386/fs/ext4/ext4.h
+Index: linux-stage/fs/ext4/ext4.h
===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/ext4.h 2010-04-16 04:57:39.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/ext4.h 2010-04-16 05:27:02.000000000 +0530
-@@ -1512,6 +1512,19 @@
+--- linux-stage.orig/fs/ext4/ext4.h
++++ linux-stage/fs/ext4/ext4.h
+@@ -1599,6 +1599,19 @@ extern int ext4_orphan_add(handle_t *, s
extern int ext4_orphan_del(handle_t *, struct inode *);
extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
__u32 start_minor_hash, __u32 *next_hash);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
-Index: linux-2.6.32.i386/fs/ext4/namei.c
+Index: linux-stage/fs/ext4/namei.c
===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/namei.c 2010-04-16 04:57:39.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/namei.c 2010-04-16 05:28:25.000000000 +0530
+--- linux-stage.orig/fs/ext4/namei.c
++++ linux-stage/fs/ext4/namei.c
@@ -24,6 +24,7 @@
* Theodore Ts'o, 2002
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/jbd2.h>
-@@ -902,9 +903,9 @@
+@@ -873,9 +874,9 @@ static inline int search_dirblock(struct
* The returned buffer_head has ->b_count elevated. The caller is expected
* to brelse() it when appropriate.
*/
{
struct super_block *sb;
struct buffer_head *bh_use[NAMEI_RA_SIZE];
-@@ -1011,6 +1012,7 @@
+@@ -981,6 +982,7 @@ cleanup_and_exit:
brelse(bh_use[ra_ptr]);
return ret;
}
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir, int *err)
-@@ -1538,8 +1540,8 @@
+@@ -1503,8 +1505,8 @@ static int make_indexed_dir(handle_t *ha
* may not sleep between calling this and putting something into
* the entry, as someone else might have used it while you slept.
*/
{
struct inode *dir = dentry->d_parent->d_inode;
struct buffer_head *bh;
-@@ -1588,6 +1590,7 @@
+@@ -1553,6 +1555,7 @@ static int ext4_add_entry(handle_t *hand
brelse(bh);
return retval;
}
/*
* Returns 0 for success, or a negative error value
-@@ -1728,10 +1731,10 @@
+@@ -1692,10 +1695,10 @@ cleanup:
* ext4_delete_entry deletes a directory entry by merging it with the
* previous entry
*/
{
struct ext4_dir_entry_2 *de, *pde;
unsigned int blocksize = dir->i_sb->s_blocksize;
-@@ -1766,7 +1769,7 @@
+@@ -1730,7 +1733,7 @@ static int ext4_delete_entry(handle_t *h
}
return -ENOENT;
}
/*
* DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
* since this indicates that nlinks count was previously 1.
-@@ -1831,6 +1834,26 @@
+@@ -1795,6 +1798,27 @@ static unsigned ext4_dentry_goal(struct
return inum;
}
+{
+ struct inode *inode;
+
-+ inode = ext4_new_inode(handle, dir, mode, 0, 0);
++ inode = ext4_new_inode(handle, dir, mode, 0, EXT4_SB(dir->i_sb)->s_inode_goal);
+ if (!IS_ERR(inode)) {
+ if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
+#ifdef CONFIG_LDISKFS_FS_XATTR
+ inode->i_fop = &ext4_file_operations;
+ ext4_set_aops(inode);
+ }
++ unlock_new_inode(inode);
+ }
+ return inode;
+}
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
-@@ -1905,40 +1928,33 @@
+@@ -1871,40 +1895,33 @@ retry:
return err;
}
ext4_handle_sync(handle);
- inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
-- &dentry->d_name, 0);
+- &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
BUFFER_TRACE(dir_block, "get_write_access");
ext4_journal_get_write_access(handle, dir_block);
de = (struct ext4_dir_entry_2 *) dir_block->b_data;
-@@ -1960,9 +1976,43 @@
+@@ -1926,9 +1943,45 @@ retry:
ext4_handle_dirty_metadata(handle, dir, dir_block);
brelse(dir_block);
ext4_mark_inode_dirty(handle, inode);
+ goto out_stop;
+
+ err = ext4_add_dot_dotdot(handle, dir, inode);
-+ if (err)
++ if (err) {
++ unlock_new_inode(inode);
+ goto out_stop;
++ }
+
err = ext4_add_entry(handle, dentry, inode);
if (err) {
unsigned int s_mb_group_prealloc;
unsigned int s_max_writeback_mb_bump;
/* where last allocation was done - for stream allocation */
+Index: linux-stage/fs/ext4/inode.c
+===================================================================
+@@ -3028,6 +3028,11 @@ static int ext4_da_writepages(struct add
+ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
+ return -EROFS;
+
++ if (wbc->nr_to_write < sbi->s_mb_small_req) {
++ nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
++ wbc->nr_to_write = sbi->s_mb_small_req;
++ }
++
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
+
Index: linux-stage/fs/ext4/mballoc.c
===================================================================
--- linux-stage.orig/fs/ext4/mballoc.c 2011-03-11 14:03:32.000000000 +0800
+Index: linux-2.6.18-128.1.6/fs/ext4/super.c
+===================================================================
+--- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
++++ linux-2.6.18-128.1.6/fs/ext4/super.c
+@@ -108,7 +108,8 @@
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
+ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size);
+
+@@ -108,7 +108,8 @@
+ ATTR_LIST(mb_max_to_scan),
+ ATTR_LIST(mb_min_to_scan),
+ ATTR_LIST(mb_order2_req),
+- ATTR_LIST(mb_stream_req),
++ ATTR_LIST(mb_small_req),
++ ATTR_LIST(mb_large_req),
+ ATTR_LIST(mb_group_prealloc),
+ ATTR_LIST(max_dir_size),
+ NULL,
Index: linux-2.6.27.21-0.1/fs/ext4/ext4_sb.h
===================================================================
--- linux-2.6.27.21-0.1.orig/fs/ext4/ext4_sb.h 2009-05-28 11:13:24.000000000 +0530
===================================================================
--- linux-2.6.27.21-0.1.orig/fs/ext4/mballoc.c 2009-05-28 11:12:43.000000000 +0530
+++ linux-2.6.27.21-0.1/fs/ext4/mballoc.c 2009-05-28 11:18:09.000000000 +0530
-@@ -1996,7 +1996,7 @@
- if (size < isize)
- size = isize;
-
-- if (size < sbi->s_mb_stream_request &&
-+ if ((ac->ac_g_ex.fe_len < sbi->s_mb_large_req) &&
- (ac->ac_flags & EXT4_MB_HINT_DATA)) {
- /* TBD: may be hot point */
- spin_lock(&sbi->s_md_lock);
@@ -2686,6 +2686,26 @@
return -ENOMEM;
}
+}
+
+
- int ext4_mb_init(struct super_block *sb, int needs_recovery)
+ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
+ ext4_group_t group, int cr)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-@@ -2738,13 +2758,57 @@
- sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
- sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
- sbi->s_mb_stats = MB_DEFAULT_STATS;
-- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
- sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
- sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
-- sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
-+
-+ if (sbi->s_stripe == 0) {
-+ sbi->s_mb_prealloc_table_size = 10;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
+@@ -2325,6 +2389,80 @@
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+
-+ ext4_mb_prealloc_table_add(sbi, 4);
-+ ext4_mb_prealloc_table_add(sbi, 8);
-+ ext4_mb_prealloc_table_add(sbi, 16);
-+ ext4_mb_prealloc_table_add(sbi, 32);
-+ ext4_mb_prealloc_table_add(sbi, 64);
-+ ext4_mb_prealloc_table_add(sbi, 128);
-+ ext4_mb_prealloc_table_add(sbi, 256);
-+ ext4_mb_prealloc_table_add(sbi, 512);
-+ ext4_mb_prealloc_table_add(sbi, 1024);
-+ ext4_mb_prealloc_table_add(sbi, 2048);
++#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
+
-+ sbi->s_mb_small_req = 256;
-+ sbi->s_mb_large_req = 1024;
-+ sbi->s_mb_group_prealloc = 512;
-+ } else {
-+ sbi->s_mb_prealloc_table_size = 3;
-+ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
-+ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
-+ if (sbi->s_mb_prealloc_table == NULL) {
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+ memset(sbi->s_mb_prealloc_table, 0, i);
-+
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
-+ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
-+
-+ sbi->s_mb_small_req = sbi->s_stripe;
-+ sbi->s_mb_large_req = sbi->s_stripe * 8;
-+ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
-+ }
-
- sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
- if (sbi->s_locality_groups == NULL) {
-+ kfree(sbi->s_mb_prealloc_table);
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return -ENOMEM;
-@@ -2915,15 +2977,90 @@
- #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
- #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
- #define EXT4_MB_ORDER2_REQ "order2_req"
--#define EXT4_MB_STREAM_REQ "stream_req"
-+#define EXT4_MB_SMALL_REQ "small_req"
-+#define EXT4_MB_LARGE_REQ "large_req"
-+#define EXT4_MB_PREALLOC_TABLE "prealloc_table"
- #define EXT4_MB_GROUP_PREALLOC "group_prealloc"
-
+static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+
+ return cnt;
+}
-+
- static int ext4_mb_init_per_dev_proc(struct super_block *sb)
- {
- #ifdef CONFIG_PROC_FS
- mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct proc_dir_entry *proc;
-+ struct proc_dir_entry *proc_entry;
- if (sbi->s_proc == NULL)
- return -EINVAL;
-@@ -2932,13 +3069,28 @@
- EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
- EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
- EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
-- EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
-+ EXT4_PROC_HANDLER(EXT4_MB_SMALL_REQ, mb_small_req);
-+ EXT4_PROC_HANDLER(EXT4_MB_LARGE_REQ, mb_large_req);
- EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
+ static void ext4_mb_history_release(struct super_block *sb)
+ {
+@@ -2400,6 +2400,7 @@
+ remove_proc_entry("mb_groups", sbi->s_proc);
+ if (sbi->s_mb_history_max)
+ remove_proc_entry("mb_history", sbi->s_proc);
++ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
+ }
+ kfree(sbi->s_mb_history);
+ }
+@@ -2408,6 +2446,13 @@
+ p->proc_fops = &ext4_mb_seq_groups_fops;
+ p->data = sb;
+ }
++ p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
++ S_IRUGO | S_IWUSR, sbi->s_proc);
++ if (p) {
++ p->data = sbi;
++ p->read_proc = ext4_mb_prealloc_table_proc_read;
++ p->write_proc = ext4_mb_prealloc_table_proc_write;
++ }
+ }
+
+ sbi->s_mb_history_cur = 0;
+@@ -2542,13 +2562,57 @@
+ sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
+ sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+ sbi->s_mb_stats = MB_DEFAULT_STATS;
+- sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
+ sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+ sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
+- sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
+
-+ proc_entry = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
-+ S_IRUGO | S_IWUSR, sbi->s_proc);
-+ if (proc_entry == NULL) {
-+ printk(KERN_ERR "EXT4-fs: unable to create %s\n",
-+ EXT4_MB_PREALLOC_TABLE);
-+ goto err_out;
-+ }
-+ proc_entry->data = sbi;
-+ proc_entry->read_proc = ext4_mb_prealloc_table_proc_read;
-+ proc_entry->write_proc = ext4_mb_prealloc_table_proc_write;
++ if (sbi->s_stripe == 0) {
++ sbi->s_mb_prealloc_table_size = 10;
++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++ if (sbi->s_mb_prealloc_table == NULL) {
++ kfree(sbi->s_mb_offsets);
++ kfree(sbi->s_mb_maxs);
++ return -ENOMEM;
++ }
++ memset(sbi->s_mb_prealloc_table, 0, i);
+
- return 0;
-
- err_out:
- remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
-- remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_LARGE_REQ, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_SMALL_REQ, sbi->s_proc);
- remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
- remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
- remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
-@@ -2959,7 +3111,9 @@
- return -EINVAL;
++ ext4_mb_prealloc_table_add(sbi, 4);
++ ext4_mb_prealloc_table_add(sbi, 8);
++ ext4_mb_prealloc_table_add(sbi, 16);
++ ext4_mb_prealloc_table_add(sbi, 32);
++ ext4_mb_prealloc_table_add(sbi, 64);
++ ext4_mb_prealloc_table_add(sbi, 128);
++ ext4_mb_prealloc_table_add(sbi, 256);
++ ext4_mb_prealloc_table_add(sbi, 512);
++ ext4_mb_prealloc_table_add(sbi, 1024);
++ ext4_mb_prealloc_table_add(sbi, 2048);
++
++ sbi->s_mb_small_req = 256;
++ sbi->s_mb_large_req = 1024;
++ sbi->s_mb_group_prealloc = 512;
++ } else {
++ sbi->s_mb_prealloc_table_size = 3;
++ i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++ sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++ if (sbi->s_mb_prealloc_table == NULL) {
++ kfree(sbi->s_mb_offsets);
++ kfree(sbi->s_mb_maxs);
++ return -ENOMEM;
++ }
++ memset(sbi->s_mb_prealloc_table, 0, i);
++
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
++ ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
++
++ sbi->s_mb_small_req = sbi->s_stripe;
++ sbi->s_mb_large_req = sbi->s_stripe * 8;
++ sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
++ }
- remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
-- remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_LARGE_REQ, sbi->s_proc);
-+ remove_proc_entry(EXT4_MB_SMALL_REQ, sbi->s_proc);
- remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
- remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
- remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
-@@ -3162,11 +3316,12 @@
+ sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
+ if (sbi->s_locality_groups == NULL) {
++ kfree(sbi->s_mb_prealloc_table);
+ kfree(sbi->s_mb_offsets);
+ kfree(sbi->s_mb_maxs);
+ return -ENOMEM;
+@@ -3032,11 +3186,12 @@
ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
/* don't cover already allocated blocks in selected range */
if (ar->pleft && start <= ar->lleft) {
-@@ -3315,7 +3456,6 @@
+@@ -3185,7 +3326,6 @@
}
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
- start > ac->ac_o_ex.fe_logical);
-- BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+ start > ac->ac_o_ex.fe_logical);
+- BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
/* now prepare goal request */
-@@ -4236,22 +4376,32 @@
- {
- struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- int bsbits = ac->ac_sb->s_blocksize_bits;
-- loff_t size, isize;
-+ loff_t size;
-
- if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
- return;
+@@ -4077,11 +4217,17 @@
-- size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
-- isize = i_size_read(ac->ac_inode) >> bsbits;
-- size = max(size, isize);
--
-- /* don't use group allocation for large files */
-- if (size >= sbi->s_mb_stream_request)
-+ if (ac->ac_o_ex.fe_len >= sbi->s_mb_small_req)
- return;
-
- if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
- return;
+ /* don't use group allocation for large files */
+ size = max(size, isize);
+- if (size >= sbi->s_mb_stream_request) {
++ if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
++ (size >= sbi->s_mb_large_req)) {
+ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ return;
+ }
+ /* request is so large that we don't care about
+ * streaming - it overweights any possible seek */
+ if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
+ return;
+
-+ size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
-+ size = size << bsbits;
-+ if (size < i_size_read(ac->ac_inode))
-+ size = i_size_read(ac->ac_inode);
-+ size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
-+
-+ /* don't use group allocation for large files */
-+ if (size >= sbi->s_mb_large_req)
-+ return;
-+
BUG_ON(ac->ac_lg != NULL);
/*
* locality group prealloc space are per cpu. The reason for having
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
+@@ -1745,7 +1769,8 @@ retry:
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
+- inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
++ inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
++ ext4_dentry_goal(dir->i_sb, dentry));
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ inode->i_op = &ext4_file_inode_operations;
+@@ -1779,7 +1804,8 @@ retry:
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
+- inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
++ inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
++ ext4_dentry_goal(dir->i_sb, dentry));
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, inode->i_mode, rdev);
+@@ -1817,7 +1843,7 @@ retry:
+ ext4_handle_sync(handle);
+
+ inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
+- &dentry->d_name, 0);
++ &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+@@ -2238,7 +2264,7 @@ retry:
+ ext4_handle_sync(handle);
+
+ inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
+- &dentry->d_name, 0);
++ &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
make use of dentry->d_fsdata to pass fid to ext4. so no
changes in ext4_add_entry() interface required.
-Index: linux-2.6.32.i386/fs/ext4/dir.c
+Index: linux-stage/fs/ext4/dir.c
===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/dir.c 2009-12-03 09:21:21.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/dir.c 2010-04-16 06:25:43.000000000 +0530
-@@ -53,11 +53,18 @@
+--- linux-stage.orig/fs/ext4/dir.c
++++ linux-stage/fs/ext4/dir.c
+@@ -53,11 +53,18 @@ const struct file_operations ext4_dir_op
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
}
-@@ -70,11 +77,11 @@
+@@ -70,11 +77,11 @@ int ext4_check_dir_entry(const char *fun
const int rlen = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
error_msg = "rec_len is too small for name_len";
else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
error_msg = "directory entry across blocks";
-@@ -179,7 +186,7 @@
+@@ -181,7 +188,7 @@ revalidate:
* failure will be detected in the
* dirent test below. */
if (ext4_rec_len_from_disk(de->rec_len,
break;
i += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
-@@ -342,12 +349,17 @@
+@@ -344,12 +351,17 @@ int ext4_htree_store_dirent(struct file
struct fname *fname, *new_fn;
struct dir_private_info *info;
int len;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
-@@ -356,7 +368,7 @@
+@@ -358,7 +370,7 @@ int ext4_htree_store_dirent(struct file
new_fn->inode = le32_to_cpu(dirent->inode);
new_fn->name_len = dirent->name_len;
new_fn->file_type = dirent->file_type;
new_fn->name[dirent->name_len] = 0;
while (*p) {
-Index: linux-2.6.32.i386/fs/ext4/ext4.h
+Index: linux-stage/fs/ext4/ext4.h
===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/ext4.h 2010-04-16 06:10:06.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/ext4.h 2010-04-16 06:27:40.000000000 +0530
-@@ -1135,6 +1135,7 @@
+--- linux-stage.orig/fs/ext4/ext4.h
++++ linux-stage/fs/ext4/ext4.h
+@@ -1187,6 +1187,7 @@ static inline void ext4_clear_inode_stat
#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
#define EXT4_FEATURE_INCOMPAT_MMP 0x0100
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
-@@ -1143,7 +1144,9 @@
+@@ -1195,7 +1196,9 @@ static inline void ext4_clear_inode_stat
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
-@@ -1225,6 +1228,43 @@
+@@ -1277,6 +1280,43 @@ struct ext4_dir_entry_2 {
#define EXT4_FT_SYMLINK 7
#define EXT4_FT_MAX 8
/*
* EXT4_DIR_PAD defines the directory entries boundaries
-@@ -1233,8 +1273,11 @@
+@@ -1285,8 +1325,11 @@ struct ext4_dir_entry_2 {
*/
#define EXT4_DIR_PAD 4
#define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1)
+
#define EXT4_MAX_REC_LEN ((1<<16)-1)
- /*
-@@ -1524,7 +1567,7 @@
+ static inline unsigned int
+@@ -1613,7 +1656,7 @@ extern struct buffer_head * ext4_find_en
struct ext4_dir_entry_2 ** res_dir);
#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
extern struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
ext4_lblk_t *block, int *err);
-@@ -1851,6 +1894,28 @@
- set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
- }
+@@ -1944,6 +1987,28 @@ static inline void set_bitmap_uptodate(s
+
+ #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+/*
+ * Compute the total directory entry data length.
#endif /* __KERNEL__ */
#endif /* _EXT4_H */
-Index: linux-2.6.32.i386/fs/ext4/namei.c
+Index: linux-stage/fs/ext4/namei.c
===================================================================
---- linux-2.6.32.i386.orig/fs/ext4/namei.c 2010-04-16 05:47:41.000000000 +0530
-+++ linux-2.6.32.i386/fs/ext4/namei.c 2010-04-16 06:40:38.000000000 +0530
-@@ -170,7 +170,8 @@
+--- linux-stage.orig/fs/ext4/namei.c
++++ linux-stage/fs/ext4/namei.c
+@@ -169,7 +169,8 @@ static unsigned dx_get_count(struct dx_e
static unsigned dx_get_limit(struct dx_entry *entries);
static void dx_set_count(struct dx_entry *entries, unsigned value);
static void dx_set_limit(struct dx_entry *entries, unsigned value);
static unsigned dx_node_limit(struct inode *dir);
static struct dx_frame *dx_probe(const struct qstr *d_name,
struct inode *dir,
-@@ -237,11 +238,12 @@
+@@ -212,11 +213,12 @@ ext4_next_entry(struct ext4_dir_entry_2
*/
struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
{
return (struct dx_root_info *) de;
}
-@@ -286,16 +288,23 @@
+@@ -261,16 +263,23 @@ static inline void dx_set_limit(struct d
((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}
return entry_space / sizeof(struct dx_entry);
}
-@@ -342,7 +351,7 @@
+@@ -317,7 +326,7 @@ static struct stats dx_show_leaf(struct
printk(":%x.%u ", h.hash,
((char *) de - base));
}
names++;
}
de = ext4_next_entry(de, size);
-@@ -447,7 +456,8 @@
+@@ -419,7 +428,8 @@ dx_probe(const struct qstr *d_name, stru
entries = (struct dx_entry *) (((char *)info) + info->info_length);
+ if (dx_get_limit(entries) != dx_root_limit(dir->i_sb->s_blocksize,
+ (struct ext4_dir_entry_2*)bh->b_data,
info->info_length)) {
- ext4_warning(dir->i_sb, __func__,
- "dx entry: limit != root limit");
-@@ -637,7 +647,7 @@
+ ext4_warning(dir->i_sb, "dx entry: limit != root limit");
+ brelse(bh);
+@@ -608,7 +618,7 @@ static int htree_dirblock_to_tree(struct
de = (struct ext4_dir_entry_2 *) bh->b_data;
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
-@@ -1050,7 +1060,7 @@
+@@ -1020,7 +1030,7 @@ static struct buffer_head * ext4_dx_find
goto errout;
de = (struct ext4_dir_entry_2 *) bh->b_data;
top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
+ ((char *) de - bh->b_data);
-@@ -1216,7 +1226,7 @@
+@@ -1181,7 +1191,7 @@ dx_move_dirents(char *from, char *to, st
while (count--) {
struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
(from + (map->offs<<2));
memcpy (to, de, rec_len);
((struct ext4_dir_entry_2 *) to)->rec_len =
ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1240,7 +1250,7 @@
+@@ -1205,7 +1215,7 @@ static struct ext4_dir_entry_2* dx_pack_
while ((char*)de < base + blocksize) {
next = ext4_next_entry(de, blocksize);
if (de->inode && de->name_len) {
if (de > to)
memmove(to, de, rec_len);
to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
-@@ -1370,10 +1380,16 @@
+@@ -1335,10 +1345,16 @@ static int add_dirent_to_buf(handle_t *h
unsigned int offset = 0;
unsigned int blocksize = dir->i_sb->s_blocksize;
unsigned short reclen;
if (!de) {
de = (struct ext4_dir_entry_2 *)bh->b_data;
top = bh->b_data + blocksize - reclen;
-@@ -1383,7 +1399,7 @@
+@@ -1348,7 +1364,7 @@ static int add_dirent_to_buf(handle_t *h
return -EIO;
if (ext4_match(namelen, name, de))
return -EEXIST;
rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
if ((de->inode? rlen - nlen: rlen) >= reclen)
break;
-@@ -1401,7 +1417,7 @@
+@@ -1366,7 +1382,7 @@ static int add_dirent_to_buf(handle_t *h
}
/* By now the buffer is marked for journaling */
rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
if (de->inode) {
struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
-@@ -1417,6 +1433,12 @@
+@@ -1382,6 +1398,12 @@ static int add_dirent_to_buf(handle_t *h
de->inode = 0;
de->name_len = namelen;
memcpy(de->name, name, namelen);
/*
* XXX shouldn't update any times until successful
* completion of syscall, but too many callers depend
-@@ -1515,7 +1537,8 @@
+@@ -1480,7 +1502,8 @@ static int make_indexed_dir(handle_t *ha
dx_set_block(entries, 1);
dx_set_count(entries, 1);
/* Initialize as for dx_probe */
hinfo.hash_version = dx_info->hash_version;
-@@ -1546,6 +1569,8 @@
+@@ -1511,6 +1534,8 @@ static int ext4_update_dotdot(handle_t *
struct buffer_head * dir_block;
struct ext4_dir_entry_2 * de;
int len, journal = 0, err = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
-@@ -1561,19 +1586,24 @@
+@@ -1526,19 +1551,24 @@ static int ext4_update_dotdot(handle_t *
/* the first item must be "." */
assert(de->name_len == 1 && de->name[0] == '.');
len = le16_to_cpu(de->rec_len);
de = (struct ext4_dir_entry_2 *)
((char *) de + le16_to_cpu(de->rec_len));
if (!journal) {
-@@ -1587,10 +1617,15 @@
+@@ -1552,10 +1582,15 @@ static int ext4_update_dotdot(handle_t *
if (len > 0)
de->rec_len = cpu_to_le16(len);
else
out_journal:
if (journal) {
-@@ -2011,12 +2046,13 @@
+@@ -1978,12 +2013,13 @@ retry:
/* Initialize @inode as a subdirectory of @dir, and add the
* "." and ".." entries into the first directory block. */
int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
if (IS_ERR(handle))
return PTR_ERR(handle);
-@@ -2040,17 +2076,32 @@
+@@ -2007,17 +2043,32 @@ int ext4_add_dot_dotdot(handle_t *handle
de = (struct ext4_dir_entry_2 *) dir_block->b_data;
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
inode->i_nlink = 2;
BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
ext4_handle_dirty_metadata(handle, dir, dir_block);
-@@ -2087,7 +2138,7 @@
+@@ -2054,7 +2105,7 @@ retry:
if (IS_ERR(inode))
goto out_stop;
- err = ext4_add_dot_dotdot(handle, dir, inode);
+ err = ext4_add_dot_dotdot(handle, dir, inode, NULL, NULL);
- if (err)
+ if (err) {
+ unlock_new_inode(inode);
goto out_stop;
-
-@@ -2123,7 +2174,7 @@
+@@ -2092,7 +2143,7 @@ static int empty_dir(struct inode *inode
int err = 0;
sb = inode->i_sb;
+ if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err)
- ext4_error(inode->i_sb, __func__,
+ ext4_error(inode->i_sb,
ext4-wantedi-2.6-rhel6.patch
-ext4-map_inode_page-2.6-rhel6.patch
-export-ext4-2.6-rhel6.patch
+ext4-map_inode_page-2.6.18-rhel5.patch
+export-ext4-2.6-rhel5.patch
ext4-remove-cond_resched-calls-rhel5.patch
ext4-ext_generation-sles11.patch
ext4-inode-version-rhel6.patch
ext4-wantedi-2.6-sles11.patch
-iopen-sles11.patch
ext4-map_inode_page-sles11.patch
export-ext4-2.6-sles11.patch
ext4-include-fixes-2.6-sles11.patch
backfs_sources := $(filter-out %.mod.c,$(wildcard @LINUX@/fs/@BACKFS@/*.c))
-ext3_new_sources := iopen.c iopen.h extents.c mballoc.c group.h dynlocks.c fiemap.h
+ext3_new_sources := extents.c mballoc.c group.h dynlocks.c fiemap.h
ext3_new_headers := ext3_extents.h
-ext4_new_sources := iopen.c iopen.h dynlocks.c fiemap.h
+ext4_new_sources := dynlocks.c fiemap.h
ext4_new_headers :=
new_sources := $(@BACKFS@_new_sources)
])
])
+#2.6.23 has new shrinker API
+AC_DEFUN([LC_REGISTER_SHRINKER],
+[AC_MSG_CHECKING([if kernel has register_shrinker])
+LB_LINUX_TRY_COMPILE([
+ #include <linux/mm.h>
+],[
+ register_shrinker(NULL);
+], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_REGISTER_SHRINKER, 1,
+ [kernel has register_shrinker])
+],[
+ AC_MSG_RESULT([no])
+])
+])
+
# 2.6.24
AC_DEFUN([LIBCFS_NETLINK_CBMUTEX],
[AC_MSG_CHECKING([for mutex in netlink_kernel_create])
])
])
+# RHEL6/2.6.32 want to have pointer to shrinker self pointer in handler function
+AC_DEFUN([LC_SHRINKER_WANT_SHRINK_PTR],
+[AC_MSG_CHECKING([shrinker want self pointer in handler])
+LB_LINUX_TRY_COMPILE([
+ #include <linux/mm.h>
+],[
+ struct shrinker tmp;
+ tmp.shrink(NULL, 0, 0);
+],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SHRINKER_WANT_SHRINK_PTR, 1,
+ [shrinker want self pointer in handler])
+],[
+ AC_MSG_RESULT(no)
+])
+])
+
#
# LIBCFS_PROG_LINUX
#
# 2.6.23
LIBCFS_KMEM_CACHE_CREATE_DTOR
LIBCFS_NETLINK_CBMUTEX
+LC_REGISTER_SHRINKER
# 2.6.24
LIBCFS_SYSCTL_UNNUMBERED
LIBCFS_SCATTERLIST_SETPAGE
LIBCFS_SOCK_MAP_FD_2ARG
# 2.6.32
LIBCFS_STACKTRACE_OPS_HAVE_WALK_STACK
+LC_SHRINKER_WANT_SHRINK_PTR
# 2.6.34
LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
])
/*
* Shrinker
*/
+#define cfs_shrinker shrinker
-#ifndef HAVE_REGISTER_SHRINKER
-/* Shrinker callback */
-typedef shrinker_t cfs_shrinker_t;
-#define cfs_set_shrinker(seeks, shrinker) set_shrinker(seeks, shrinker)
-#define cfs_remove_shrinker(shrinker) remove_shrinker(shrinker)
-#endif /* !HAVE_REGISTER_SHRINKER */
+#ifdef HAVE_SHRINKER_WANT_SHRINK_PTR
+#define SHRINKER_FIRST_ARG struct shrinker *shrinker,
+#else
+#define SHRINKER_FIRST_ARG
+#endif
+
+#ifdef HAVE_REGISTER_SHRINKER
+typedef int (*cfs_shrinker_t)(SHRINKER_FIRST_ARG int nr_to_scan, gfp_t gfp_mask);
+
+static inline
+struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
+{
+ struct shrinker *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return (NULL);
+
+ s->shrink = func;
+ s->seeks = seek;
+
+ register_shrinker(s);
-/* struct shrinker */
-#define cfs_shrinker shrinker
+ return s;
+}
+
+static inline
+void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+{
+ if (shrinker == NULL)
+ return;
+
+ unregister_shrinker(shrinker);
+ kfree(shrinker);
+}
+#else
+typedef shrinker_t cfs_shrinker_t;
+#define cfs_set_shrinker(s, f) set_shrinker(s, f)
+#define cfs_remove_shrinker(s) remove_shrinker(s)
+#endif
#define CFS_DEFAULT_SEEKS DEFAULT_SEEKS
#endif /* __LINUX_CFS_MEM_H__ */
])
])
-# 2.6.23 has new shrinker API
-AC_DEFUN([LC_REGISTER_SHRINKER],
-[LB_CHECK_SYMBOL_EXPORT([register_shrinker],
-[mm/vmscan.c],[
- AC_DEFINE(HAVE_REGISTER_SHRINKER, 1,
- [kernel exports register_shrinker])
-# 2.6.32 added another argument to struct shrinker->shrink
- AC_MSG_CHECKING([if passing shrinker as first argument])
- tmp_flags="$EXTRA_KCFLAGS"
- EXTRA_KCFLAGS="-Werror"
- LB_LINUX_TRY_COMPILE([
- #include <linux/mm.h>
- int test_shrink(struct shrinker *, int, gfp_t);
- ],[
- struct shrinker *shr = NULL;
- shr->shrink = test_shrink;
- ],[
- AC_MSG_RESULT([yes])
- AC_DEFINE(SHRINKER_FIRST_ARG, [struct shrinker *shrinker,],
- [kernel is passing shrinker as first argument])
- ],[
- AC_DEFINE(SHRINKER_FIRST_ARG, ,
- [kernel is not passing shrinker as first argument])
- ])
- EXTRA_KCFLAGS="$tmp_flags"
-],[
- AC_DEFINE(SHRINKER_FIRST_ARG, ,
- [kernel does not exports register_shrinker,
- so SHRINKER_FIRST_ARG is empty])
-])
-])
-
# 2.6.23 add code to wait other users to complete before removing procfs entry
AC_DEFUN([LC_PROCFS_USERS],
[AC_MSG_CHECKING([if kernel has pde_users member in procfs entry struct])
#
# 2.6.27 sles11 move the quotaio_v1{2}.h from include/linux to fs
# 2.6.32 move the quotaio_v1{2}.h from fs to fs/quota
-AC_DEFUN([LC_HAVE_QUOTAIO_V1_H],
-[LB_CHECK_FILE([$LINUX/include/linux/quotaio_v1.h],[
- AC_DEFINE(HAVE_QUOTAIO_V1_H, 1,
- [kernel has include/linux/quotaio_v1.h])
-],[LB_CHECK_FILE([$LINUX/fs/quotaio_v1.h],[
- AC_DEFINE(HAVE_FS_QUOTAIO_V1_H, 1,
+AC_DEFUN([LC_HAVE_QUOTAIO_H],
+[LB_CHECK_FILE([$LINUX/include/linux/quotaio_v2.h],[
+ AC_DEFINE(HAVE_QUOTAIO_H, 1,
+ [kernel has include/linux/quotaio_v2.h])
+],[LB_CHECK_FILE([$LINUX/fs/quotaio_v2.h],[
+ AC_DEFINE(HAVE_FS_QUOTAIO_H, 1,
[kernel has fs/quotaio_v1.h])
-],[LB_CHECK_FILE([$LINUX/fs/quota/quotaio_v1.h],[
- AC_DEFINE(HAVE_FS_QUOTA_QUOTAIO_V1_H, 1,
- [kernel has fs/quota/quotaio_v1.h])
+],[LB_CHECK_FILE([$LINUX/fs/quota/quotaio_v2.h],[
+ AC_DEFINE(HAVE_FS_QUOTA_QUOTAIO_H, 1,
+ [kernel has fs/quota/quotaio_v2.h])
],[
AC_MSG_RESULT([no])
])
LB_LINUX_TRY_COMPILE([
#include <linux/kernel.h>
#include <linux/fs.h>
- #ifdef HAVE_QUOTAIO_V1_H
+ #ifdef HAVE_QUOTAIO_H
# include <linux/quotaio_v2.h>
int versions[] = V2_INITQVERSIONS_R1;
struct v2_disk_dqblk_r1 dqblk_r1;
- #elif defined(HAVE_FS_QUOTA_QUOTAIO_V1_H)
+ #elif defined(HAVE_FS_QUOTA_QUOTAIO_H)
# include <quota/quotaio_v2.h>
struct v2r1_disk_dqblk dqblk_r1;
- #elif defined(HAVE_FS_QUOTAIO_V1_H)
+ #elif defined(HAVE_FS_QUOTAIO_H)
# include <quotaio_v2.h>
struct v2r1_disk_dqblk dqblk_r1;
#else
#
AC_DEFUN([LC_PROG_LINUX],
[LC_LUSTRE_VERSION_H
- if test x$enable_server = xyes ; then
- AC_DEFINE(HAVE_SERVER_SUPPORT, 1, [support server])
- LC_FUNC_DEV_SET_RDONLY
- LC_STACK_SIZE
- LC_CONFIG_BACKINGFS
- fi
LC_CONFIG_PINGER
LC_CONFIG_CHECKSUM
LC_CONFIG_LIBLUSTRE_RECOVERY
LC_KERNEL_SENDFILE
LC_HAVE_EXPORTFS_H
LC_VM_OP_FAULT
- LC_REGISTER_SHRINKER
LC_PROCFS_USERS
LC_EXPORTFS_DECODE_FH
# 2.6.27.15-2 sles11
LC_BI_HW_SEGMENTS
- LC_HAVE_QUOTAIO_V1_H
+ LC_HAVE_QUOTAIO_H
LC_VFS_SYMLINK_5ARGS
LC_SB_ANY_QUOTA_ACTIVE
LC_SB_HAS_QUOTA_ACTIVE
#
if test x$enable_server = xyes ; then
- if test x$enable_quota_module = xyes ; then
- LC_QUOTA64 # must after LC_HAVE_QUOTAIO_V1_H
- fi
+ AC_DEFINE(HAVE_SERVER_SUPPORT, 1, [support server])
+ LC_FUNC_DEV_SET_RDONLY
+ LC_STACK_SIZE
+ LC_CONFIG_BACKINGFS
+ LC_QUOTA64
fi
])
#define cpu_to_node(cpu) 0
#endif
-#ifdef HAVE_REGISTER_SHRINKER
-typedef int (*cfs_shrinker_t)(SHRINKER_FIRST_ARG int nr_to_scan, gfp_t gfp_mask);
-
-static inline
-struct shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
-{
- struct shrinker *s;
-
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
- s->shrink = func;
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void cfs_remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-#endif
-
#ifdef HAVE_BIO_ENDIO_2ARG
#define cfs_bio_io_error(a,b) bio_io_error((a))
#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
* than than full kernel source, so we provide them here for compatibility.
*/
#ifdef __KERNEL__
-# if !defined(HAVE_QUOTAIO_V1_H) && !defined(HAVE_FS_QUOTA_QUOTAIO_V1_H) && \
- !defined(HAVE_FS_QUOTAIO_V1_H)
+# if !defined(HAVE_QUOTAIO_H) && !defined(HAVE_FS_QUOTA_QUOTAIO_H) && \
+ !defined(HAVE_FS_QUOTAIO_H)
#include <linux/types.h>
#include <linux/quota.h>
===================================================================
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -1026,7 +1026,7 @@ extern int blk_verify_command(unsigned c
+@@ -1026,6 +1026,6 @@ extern int blk_verify_command(unsigned c
enum blk_default_limits {
- BLK_MAX_SEGMENTS = 128,
+- BLK_MAX_SEGMENTS = 128,
++ BLK_MAX_SEGMENTS = 256,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 1024,
+ BLK_DEF_MAX_SECTORS = 2048,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
+Index: linux-2.6.32.x86_64/include/scsi/scsi.h
+===================================================================
+--- linux-2.6.32.x86_64.orig/include/scsi/scsi.h 2010-10-26 21:45:52.000000000 +0300
++++ linux-2.6.32.x86_64/include/scsi/scsi.h 2010-10-29 11:16:35.000000000 +0300
+@@ -19,7 +19,7 @@ struct scsi_cmnd;
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
+ * minimum value is 32
+ */
+-#define SCSI_MAX_SG_SEGMENTS 128
++#define SCSI_MAX_SG_SEGMENTS 256
+
+ /*
+ * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
/*
* Check whether this bio extends beyond the end of the device.
*/
-@@ -1506,6 +1508,12 @@
+@@ -1506,6 +1508,23 @@
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
-+ /* This is Lustre's dev_rdonly check */
-+ if (bio_rw(bio) == WRITE && dev_check_rdonly(bio->bi_bdev)) {
-+ bio_endio(bio, 0);
-+ break;
-+ }
++ /* this is cfs's dev_rdonly check */
++ if (bio_rw(bio) == WRITE && dev_check_rdonly(bio->bi_bdev)) {
++ struct block_device *bdev = bio->bi_bdev;
++
++ printk(KERN_WARNING "Write to readonly device %s (%#x) "
++ "bi_flags: %lx, bi_vcnt: %d, bi_idx: %d, "
++ "bi->size: %d, bi_cnt: %d, bi_private: %p\n",
++ bdev->bd_disk ? bdev->bd_disk->disk_name : "",
++ bdev->bd_dev, bio->bi_flags, bio->bi_vcnt,
++ bio->bi_idx, bio->bi_size,
++ atomic_read(&bio->bi_cnt), bio->bi_private);
++ set_bit(BIO_RDONLY, &bio->bi_flags);
++ bio_endio(bio, 0);
++ clear_bit(BIO_RDONLY, &bio->bi_flags);
++ break;
++ }
+
if (should_fail_request(bio))
goto end_io;
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
+Index: linux+rh+chaos/include/linux/bio.h
+===================================================================
+--- linux+rh+chaos.orig/include/linux/bio.h
++++ linux+rh+chaos/include/linux/bio.h
+@@ -126,6 +126,7 @@ struct bio {
+ #define BIO_NULL_MAPPED 9 /* contains invalid user pages */
+ #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
+ #define BIO_QUIET 11 /* Make BIO Quiet */
++#define BIO_RDONLY 31 /* device is readonly */
+ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
+
+ /*
--- /dev/null
+Index: linux-2.6.32-71.18.1.el6/fs/dcache.c
+===================================================================
+--- linux-2.6.32-71.18.1.el6.orig/fs/dcache.c
++++ linux-2.6.32-71.18.1.el6/fs/dcache.c
+@@ -280,6 +280,13 @@ int d_invalidate(struct dentry * dentry)
+ spin_unlock(&dcache_lock);
+ return 0;
+ }
++
++ /* network invalidation by Lustre */
++ if (dentry->d_flags & DCACHE_LUSTRE_INVALID) {
++ spin_unlock(&dcache_lock);
++ return 0;
++ }
++
+ /*
+ * Check whether to do a partial shrink_dcache
+ * to get rid of unused child entries.
+Index: linux-2.6.32-71.18.1.el6/include/linux/dcache.h
+===================================================================
+--- linux-2.6.32-71.18.1.el6.orig/include/linux/dcache.h
++++ linux-2.6.32-71.18.1.el6/include/linux/dcache.h
+@@ -185,6 +185,7 @@ d_iput: no no no yes
+ #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
+
+ #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
++#define DCACHE_LUSTRE_INVALID 0x0100 /* Lustre invalidated */
+
+ extern spinlock_t dcache_lock;
+ extern seqlock_t rename_lock;
lustre_version.patch
mpt-fusion-max-sge-rhel6.patch
raid5-mmp-unplug-dev-rhel6.patch
+vfs_races-2.6.32-rhel6.patch
dev_read_only-2.6.32-rhel6.patch
blkdev_tunables-2.6-rhel6.patch
export-2.6.32-vanilla.patch
EXTRA_VERSION=${lnxrel}_lustre.@VERSION@
LUSTRE_VERSION=@VERSION@
+DEVEL_PATH_ARCH_DELIMETER="."
OFED_VERSION=inkernel
BASE_ARCHS="i686 x86_64 ia64 ppc64"
EXTRA_DIST = genlib.sh
-CLEANFILES := liblsupport.a liblustre.so
+CLEANFILES = liblsupport.a liblustre.so
#include <linux/version.h>
#include <linux/bitops.h>
#include <linux/quota.h>
-#ifdef HAVE_QUOTAIO_V1_H
-# include <linux/quotaio_v1.h>
+#ifdef HAVE_QUOTAIO_H
# include <linux/quotaio_v2.h>
-#elif defined(HAVE_FS_QUOTA_QUOTAIO_V1_H)
-# include <quota/quotaio_v1.h>
+#elif defined(HAVE_FS_QUOTA_QUOTAIO_H)
# include <quota/quotaio_v2.h>
# include <quota/quota_tree.h>
# define V2_DQTREEOFF QT_TREEOFF
#elif defined(HAVE_FS_QUOTAIO_V1_H)
-# include <quotaio_v1.h>
# include <quotaio_v2.h>
# include <quota_tree.h>
# define V2_DQTREEOFF QT_TREEOFF
+# define V2_INITQVERSIONS_R1 V2_INITQVERSIONS
#endif
#ifdef QFMT_VFS_V1
static void mdd_lockdep_pd_acquire(struct mdd_object *obj,
enum mdd_object_role role)
{
+#ifdef HAVE_LOCK_MAP_ACQUIRE
+ lock_map_acquire(&obj->dep_map);
+#else
lock_acquire(&obj->dep_map, role, 0, 1, 2, RETIP);
+#endif
}
static void mdd_lockdep_pd_release(struct mdd_object *obj)
{
+#ifdef HAVE_LOCK_MAP_ACQUIRE
+ lock_map_release(&obj->dep_map);
+#else
lock_release(&obj->dep_map, 0, RETIP);
+#endif
}
#else /* !CONFIG_LOCKDEP */
if (unlikely(nr_to_scan != 0)) {
cfs_spin_lock(&page_pools.epp_lock);
nr_to_scan = min(nr_to_scan, (int) page_pools.epp_free_pages -
- PTLRPC_MAX_BRW_PAGES);
+ PTLRPC_MAX_BRW_PAGES);
if (nr_to_scan > 0) {
enc_pools_release_free_pages(nr_to_scan);
CDEBUG(D_SEC, "released %d pages, %ld left\n",