+++ /dev/null
-Index: linux-2.6.9-42.0.10.EL_lustre.1.4.10/fs/ext3/super.c
-===================================================================
---- linux-2.6.9-42.0.10.EL_lustre.1.4.10.orig/fs/ext3/super.c 2007-05-16 08:46:24.000000000 +0200
-+++ linux-2.6.9-42.0.10.EL_lustre.1.4.10/fs/ext3/super.c 2007-05-16 08:48:58.000000000 +0200
-@@ -123,6 +123,8 @@ void ext3_journal_abort_handle(const cha
- journal_abort_handle(handle);
- }
-
-+EXPORT_SYMBOL(ext3_journal_abort_handle);
-+
- /* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
-@@ -2064,6 +2066,8 @@ int ext3_force_commit(struct super_block
- return ret;
- }
-
-+EXPORT_SYMBOL(ext3_force_commit);
-+
- /*
- * Ext3 always journals updates to the superblock itself, so we don't
- * have to propagate any other updates to the superblock on disk at this
-@@ -2586,6 +2590,12 @@ int ext3_map_inode_page(struct inode *in
- unsigned long *blocks, int *created, int create);
- EXPORT_SYMBOL(ext3_map_inode_page);
-
-+EXPORT_SYMBOL(ext3_xattr_get);
-+EXPORT_SYMBOL(ext3_xattr_set_handle);
-+EXPORT_SYMBOL(ext3_bread);
-+EXPORT_SYMBOL(ext3_journal_start_sb);
-+EXPORT_SYMBOL(__ext3_journal_stop);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
- MODULE_LICENSE("GPL");
+++ /dev/null
-Index: linux-2.6.5-7.283_lustre-1.4.10.1/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-7.283_lustre-1.4.10.1.orig/fs/ext3/super.c 2007-05-30 08:48:29.000000000 +0200
-+++ linux-2.6.5-7.283_lustre-1.4.10.1/fs/ext3/super.c 2007-05-30 08:48:37.000000000 +0200
-@@ -116,6 +116,8 @@ void ext3_journal_abort_handle(const cha
- handle->h_err = err;
- }
-
-+EXPORT_SYMBOL(ext3_journal_abort_handle);
-+
- static char error_buf[1024];
-
- /* Deal with the reporting of failure conditions on a filesystem such as
-@@ -1895,6 +1897,8 @@ int ext3_force_commit(struct super_block
- return ret;
- }
-
-+EXPORT_SYMBOL(ext3_force_commit);
-+
- /*
- * Ext3 always journals updates to the superblock itself, so we don't
- * have to propagate any other updates to the superblock on disk at this
-@@ -2334,6 +2338,12 @@ int ext3_map_inode_page(struct inode *in
- unsigned long *blocks, int *created, int create);
- EXPORT_SYMBOL(ext3_map_inode_page);
-
-+EXPORT_SYMBOL(ext3_xattr_get);
-+EXPORT_SYMBOL(ext3_xattr_set_handle);
-+EXPORT_SYMBOL(ext3_bread);
-+EXPORT_SYMBOL(ext3_journal_start);
-+EXPORT_SYMBOL(__ext3_journal_stop);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
- MODULE_LICENSE("GPL");
+++ /dev/null
-Index: linux-2.6.5-sles9/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs_sb.h 2004-11-03 08:36:51.000000000 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs_sb.h 2004-11-09 02:20:51.598024096 +0300
-@@ -19,9 +19,12 @@
- #ifdef __KERNEL__
- #include <linux/timer.h>
- #include <linux/wait.h>
-+#ifndef EXT_INCLUDE
-+#define EXT_INCLUDE
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
- #endif
-+#endif
- #include <linux/rbtree.h>
-
- /*
+++ /dev/null
-Index: linux-2.6.5-7.201-full/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-7.201-full.orig/include/linux/ext3_fs.h 2006-08-09 17:59:34.000000000 +0400
-+++ linux-2.6.5-7.201-full/include/linux/ext3_fs.h 2006-08-22 12:35:55.000000000 +0400
-@@ -793,6 +793,7 @@ extern void ext3_put_super (struct super
- extern void ext3_write_super (struct super_block *);
- extern void ext3_write_super_lockfs (struct super_block *);
- extern void ext3_unlockfs (struct super_block *);
-+extern void ext3_commit_super (struct super_block *, struct ext3_super_block *, int);
- extern int ext3_remount (struct super_block *, int *, char *);
- extern int ext3_statfs (struct super_block *, struct kstatfs *);
-
-Index: linux-2.6.5-7.201-full/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-7.201-full.orig/fs/ext3/super.c 2006-08-09 17:59:37.000000000 +0400
-+++ linux-2.6.5-7.201-full/fs/ext3/super.c 2006-08-09 17:59:37.000000000 +0400
-@@ -39,7 +39,7 @@
- static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
- static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
- int);
--static void ext3_commit_super (struct super_block * sb,
-+void ext3_commit_super (struct super_block * sb,
- struct ext3_super_block * es,
- int sync);
- static void ext3_mark_recovery_complete(struct super_block * sb,
-@@ -1781,7 +1781,7 @@ static int ext3_create_journal(struct su
- return 0;
- }
-
--static void ext3_commit_super (struct super_block * sb,
-+void ext3_commit_super (struct super_block * sb,
- struct ext3_super_block * es,
- int sync)
- {
-Index: linux-2.6.5-7.201-full/fs/ext3/namei.c
-===================================================================
---- linux-2.6.5-7.201-full.orig/fs/ext3/namei.c 2006-08-09 17:59:37.000000000 +0400
-+++ linux-2.6.5-7.201-full/fs/ext3/namei.c 2006-08-09 17:59:37.000000000 +0400
-@@ -1598,7 +1598,7 @@ static int ext3_delete_entry (handle_t *
- struct buffer_head * bh)
- {
- struct ext3_dir_entry_2 * de, * pde;
-- int i;
-+ int i, err;
-
- i = 0;
- pde = NULL;
-@@ -1608,7 +1608,9 @@ static int ext3_delete_entry (handle_t *
- return -EIO;
- if (de == de_del) {
- BUFFER_TRACE(bh, "get_write_access");
-- ext3_journal_get_write_access(handle, bh);
-+ err = ext3_journal_get_write_access(handle, bh);
-+ if (err)
-+ return err;
- if (pde)
- pde->rec_len =
- cpu_to_le16(le16_to_cpu(pde->rec_len) +
-Index: linux-2.6.5-7.201-full/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.5-7.201-full.orig/fs/ext3/xattr.c 2006-07-14 01:53:23.000000000 +0400
-+++ linux-2.6.5-7.201-full/fs/ext3/xattr.c 2006-08-09 17:59:37.000000000 +0400
-@@ -107,7 +107,7 @@ ext3_xattr_register(int name_index, stru
- {
- int error = -EINVAL;
-
-- if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
-+ if (name_index > 0 && name_index < EXT3_XATTR_INDEX_MAX) {
- write_lock(&ext3_handler_lock);
- if (!ext3_xattr_handlers[name_index-1]) {
- ext3_xattr_handlers[name_index-1] = handler;
-Index: linux-2.6.5-7.201-full/fs/ext3/inode.c
-===================================================================
---- linux-2.6.5-7.201-full.orig/fs/ext3/inode.c 2006-07-14 01:53:22.000000000 +0400
-+++ linux-2.6.5-7.201-full/fs/ext3/inode.c 2006-08-22 12:35:28.000000000 +0400
-@@ -1517,9 +1517,14 @@ out_stop:
- if (end > inode->i_size) {
- ei->i_disksize = end;
- i_size_write(inode, end);
-- err = ext3_mark_inode_dirty(handle, inode);
-- if (!ret)
-- ret = err;
-+ /*
-+ * We're going to return a positive `ret'
-+ * here due to non-zero-length I/O, so there's
-+ * no way of reporting error returns from
-+ * ext3_mark_inode_dirty() to userspace. So
-+ * ignore it.
-+ */
-+ ext3_mark_inode_dirty(handle, inode);
- }
- }
- err = ext3_journal_stop(handle);
-@@ -1811,8 +1816,18 @@ ext3_clear_blocks(handle_t *handle, stru
- ext3_mark_inode_dirty(handle, inode);
- ext3_journal_test_restart(handle, inode);
- if (bh) {
-+ int err;
- BUFFER_TRACE(bh, "retaking write access");
-- ext3_journal_get_write_access(handle, bh);
-+ err = ext3_journal_get_write_access(handle, bh);
-+ if (err) {
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-+ printk (KERN_CRIT"EXT3-fs: can't continue truncate\n");
-+ EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-+ es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-+ ext3_commit_super(sb, es, 1);
-+ return;
-+ }
- }
- }
-
+++ /dev/null
-Index: linux-2.6.9-full/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.9-full.orig/include/linux/ext3_fs.h 2006-08-09 17:56:39.000000000 +0400
-+++ linux-2.6.9-full/include/linux/ext3_fs.h 2006-08-22 12:36:22.000000000 +0400
-@@ -826,6 +826,7 @@ extern void ext3_put_super (struct super
- extern void ext3_write_super (struct super_block *);
- extern void ext3_write_super_lockfs (struct super_block *);
- extern void ext3_unlockfs (struct super_block *);
-+extern void ext3_commit_super (struct super_block *, struct ext3_super_block *, int);
- extern int ext3_remount (struct super_block *, int *, char *);
- extern int ext3_statfs (struct super_block *, struct kstatfs *);
-
-Index: linux-2.6.9-full/fs/ext3/super.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/super.c 2006-08-09 17:56:40.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/super.c 2006-08-09 17:56:40.000000000 +0400
-@@ -43,7 +43,7 @@ static int ext3_load_journal(struct supe
- unsigned long journal_devnum);
- static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
- int);
--static void ext3_commit_super (struct super_block * sb,
-+void ext3_commit_super (struct super_block * sb,
- struct ext3_super_block * es,
- int sync);
- static void ext3_mark_recovery_complete(struct super_block * sb,
-@@ -1991,7 +1991,7 @@ static int ext3_create_journal(struct su
- return 0;
- }
-
--static void ext3_commit_super (struct super_block * sb,
-+void ext3_commit_super (struct super_block * sb,
- struct ext3_super_block * es,
- int sync)
- {
-Index: linux-2.6.9-full/fs/ext3/namei.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/namei.c 2006-08-09 17:56:40.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/namei.c 2006-08-09 17:56:40.000000000 +0400
-@@ -1599,7 +1599,7 @@ static int ext3_delete_entry (handle_t *
- struct buffer_head * bh)
- {
- struct ext3_dir_entry_2 * de, * pde;
-- int i;
-+ int i, err;
-
- i = 0;
- pde = NULL;
-@@ -1609,7 +1609,9 @@ static int ext3_delete_entry (handle_t *
- return -EIO;
- if (de == de_del) {
- BUFFER_TRACE(bh, "get_write_access");
-- ext3_journal_get_write_access(handle, bh);
-+ err = ext3_journal_get_write_access(handle, bh);
-+ if (err)
-+ return err;
- if (pde)
- pde->rec_len =
- cpu_to_le16(le16_to_cpu(pde->rec_len) +
-Index: linux-2.6.9-full/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/xattr.c 2006-06-01 14:58:48.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/xattr.c 2006-08-09 17:56:40.000000000 +0400
-@@ -132,7 +132,7 @@ ext3_xattr_handler(int name_index)
- {
- struct xattr_handler *handler = NULL;
-
-- if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX)
-+ if (name_index > 0 && name_index < EXT3_XATTR_INDEX_MAX)
- handler = ext3_xattr_handler_map[name_index];
- return handler;
- }
-Index: linux-2.6.9-full/fs/ext3/inode.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/inode.c 2006-06-02 23:37:38.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/inode.c 2006-08-22 12:34:28.000000000 +0400
-@@ -1513,9 +1513,14 @@ out_stop:
- if (end > inode->i_size) {
- ei->i_disksize = end;
- i_size_write(inode, end);
-- err = ext3_mark_inode_dirty(handle, inode);
-- if (!ret)
-- ret = err;
-+ /*
-+ * We're going to return a positive `ret'
-+ * here due to non-zero-length I/O, so there's
-+ * no way of reporting error returns from
-+ * ext3_mark_inode_dirty() to userspace. So
-+ * ignore it.
-+ */
-+ ext3_mark_inode_dirty(handle, inode);
- }
- }
- err = ext3_journal_stop(handle);
-@@ -1807,8 +1812,18 @@ ext3_clear_blocks(handle_t *handle, stru
- ext3_mark_inode_dirty(handle, inode);
- ext3_journal_test_restart(handle, inode);
- if (bh) {
-+ int err;
- BUFFER_TRACE(bh, "retaking write access");
-- ext3_journal_get_write_access(handle, bh);
-+ err = ext3_journal_get_write_access(handle, bh);
-+ if (err) {
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-+ printk (KERN_CRIT"EXT3-fs: can't continue truncate\n");
-+ EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-+ es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-+ ext3_commit_super(sb, es, 1);
-+ return;
-+ }
- }
- }
-
+++ /dev/null
----
- fs/ext3/super.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- linux-2.6.16.21-0.8.orig/fs/ext3/super.c
-+++ linux-2.6.16.21-0.8/fs/ext3/super.c
-@@ -1425,7 +1425,7 @@ static int ext3_fill_super (struct super
- sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
-
- /* enable barriers by default */
-- set_opt(sbi->s_mount_opt, BARRIER);
-+ /* set_opt(sbi->s_mount_opt, BARRIER); */
- set_opt(sbi->s_mount_opt, RESERVATION);
-
- if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
+++ /dev/null
-Index: linux-stage/fs/ext3/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/ialloc.c 2005-10-04 16:53:24.000000000 -0600
-+++ linux-stage/fs/ext3/ialloc.c 2005-10-04 17:07:25.000000000 -0600
-@@ -629,6 +629,9 @@
- spin_unlock(&sbi->s_next_gen_lock);
-
- ei->i_state = EXT3_STATE_NEW;
-+ ei->i_extra_isize =
-+ (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-+ sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2005-10-04 17:00:22.000000000 -0600
-+++ linux-stage/fs/ext3/inode.c 2005-10-04 17:07:25.000000000 -0600
-@@ -2274,7 +2274,7 @@
- * trying to determine the inode's location on-disk and no read need be
- * performed.
- */
--static int ext3_get_inode_loc(struct inode *inode,
-+int ext3_get_inode_loc(struct inode *inode,
- struct ext3_iloc *iloc, int in_mem)
- {
- unsigned long block;
-@@ -2484,6 +2484,11 @@ void ext3_read_inode(struct inode * inod
- ei->i_data[block] = raw_inode->i_block[block];
- INIT_LIST_HEAD(&ei->i_orphan);
-
-+ if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
-+ ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-+ else
-+ ei->i_extra_isize = 0;
-+
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ext3_file_inode_operations;
- inode->i_fop = &ext3_file_operations;
-@@ -2619,6 +2624,9 @@ static int ext3_do_update_inode(handle_t
- } else for (block = 0; block < EXT3_N_BLOCKS; block++)
- raw_inode->i_block[block] = ei->i_data[block];
-
-+ if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
-+ raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-+
- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- rc = ext3_journal_dirty_metadata(handle, bh);
- if (!err)
-@@ -2849,7 +2857,8 @@ ext3_reserve_inode_write(handle_t *handl
- {
- int err = 0;
- if (handle) {
-- err = ext3_get_inode_loc(inode, iloc, 1);
-+ err = ext3_get_inode_loc(inode, iloc, EXT3_I(inode)->i_state &
-+ EXT3_STATE_NEW);
- if (!err) {
- BUFFER_TRACE(iloc->bh, "get_write_access");
- err = ext3_journal_get_write_access(handle, iloc->bh);
-Index: linux-stage/fs/ext3/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.c 2005-10-04 16:50:11.000000000 -0600
-+++ linux-stage/fs/ext3/xattr.c 2005-10-04 17:19:43.000000000 -0600
-@@ -149,17 +149,12 @@
- }
-
- /*
-- * ext3_xattr_get()
-- *
-- * Copy an extended attribute into the buffer
-- * provided, or compute the buffer size required.
-- * Buffer is NULL to compute the size of the buffer required.
-+ * ext3_xattr_block_get()
- *
-- * Returns a negative error number on failure, or the number of bytes
-- * used / required on success.
-+ * routine looks for attribute in EA block and returns it's value and size
- */
- int
--ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-+ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
- void *buffer, size_t buffer_size)
- {
- struct buffer_head *bh = NULL;
-@@ -173,7 +168,6 @@
-
- if (name == NULL)
- return -EINVAL;
-- down_read(&EXT3_I(inode)->xattr_sem);
- error = -ENODATA;
- if (!EXT3_I(inode)->i_file_acl)
- goto cleanup;
-@@ -246,15 +240,87 @@
-
- cleanup:
- brelse(bh);
-- up_read(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-
- /*
-- * ext3_xattr_list()
-+ * ext3_xattr_ibody_get()
- *
-- * Copy a list of attribute names into the buffer
-+ * routine looks for attribute in inode body and returns it's value and size
-+ */
-+int
-+ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-+ void *buffer, size_t buffer_size)
-+{
-+ int size, name_len = strlen(name), storage_size;
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ struct ext3_iloc iloc;
-+ char *start, *end;
-+ int ret = -ENOENT;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return -ENOENT;
-+
-+ ret = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (ret)
-+ return ret;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_get",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len))
-+ goto found;
-+ last = next;
-+ }
-+
-+ /* can't find EA */
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+
-+found:
-+ size = le32_to_cpu(last->e_value_size);
-+ if (buffer) {
-+ ret = -ERANGE;
-+ if (buffer_size >= size) {
-+ memcpy(buffer, start + le16_to_cpu(last->e_value_offs),
-+ size);
-+ ret = size;
-+ }
-+ } else
-+ ret = size;
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_get()
-+ *
-+ * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
-@@ -262,7 +328,31 @@
- * used / required on success.
- */
- int
--ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
-+ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-+ void *buffer, size_t buffer_size)
-+{
-+ int err;
-+
-+ down_read(&EXT3_I(inode)->xattr_sem);
-+
-+ /* try to find attribute in inode body */
-+ err = ext3_xattr_ibody_get(inode, name_index, name,
-+ buffer, buffer_size);
-+ if (err < 0)
-+ /* search was unsuccessful, try to find EA in dedicated block */
-+ err = ext3_xattr_block_get(inode, name_index, name,
-+ buffer, buffer_size);
-+ up_read(&EXT3_I(inode)->xattr_sem);
-+
-+ return err;
-+}
-+
-+/* ext3_xattr_ibody_list()
-+ *
-+ * generate list of attributes stored in EA block
-+ */
-+int
-+ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
- {
- struct buffer_head *bh = NULL;
- struct ext3_xattr_entry *entry;
-@@ -273,7 +363,6 @@
- ea_idebug(inode, "buffer=%p, buffer_size=%ld",
- buffer, (long)buffer_size);
-
-- down_read(&EXT3_I(inode)->xattr_sem);
- error = 0;
- if (!EXT3_I(inode)->i_file_acl)
- goto cleanup;
-@@ -330,11 +419,149 @@
-
- cleanup:
- brelse(bh);
-- up_read(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-
-+/* ext3_xattr_ibody_list()
-+ *
-+ * generate list of attributes stored in inode body
-+ */
-+int
-+ext3_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
-+{
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ char *start, *end, *buf;
-+ struct ext3_iloc iloc;
-+ int storage_size;
-+ size_t rest = buffer_size;
-+ int ret;
-+ int size = 0;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return 0;
-+
-+ ret = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (ret)
-+ return ret;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return 0;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ struct xattr_handler *handler;
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_list",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+ handler = ext3_xattr_handler(last->e_name_index);
-+ if (handler)
-+ size += handler->list(inode, NULL, 0, last->e_name,
-+ last->e_name_len);
-+ last = next;
-+ }
-+
-+ if (!buffer) {
-+ ret = size;
-+ goto cleanup;
-+ } else {
-+ ret = -ERANGE;
-+ if (size > buffer_size)
-+ goto cleanup;
-+ }
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ buf = buffer;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ struct xattr_handler *handler;
-+ handler = ext3_xattr_handler(last->e_name_index);
-+ if (handler) {
-+ size_t size = handler->list(inode, buffer, rest,
-+ last->e_name,
-+ last->e_name_len);
-+ if (buffer) {
-+ if (size > rest) {
-+ ret = -ERANGE;
-+ goto cleanup;
-+ }
-+ buffer += size;
-+ }
-+ rest -= size;
-+ }
-+ last = next;
-+ }
-+ ret = size;
-+cleanup:
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_list()
-+ *
-+ * Copy a list of attribute names into the buffer
-+ * provided, or compute the buffer size required.
-+ * Buffer is NULL to compute the size of the buffer required.
-+ *
-+ * Returns a negative error number on failure, or the number of bytes
-+ * used / required on success.
-+ */
-+int
-+ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
-+{
-+ int error;
-+ int size = buffer_size;
-+
-+ down_read(&EXT3_I(inode)->xattr_sem);
-+
-+ /* get list of attributes stored in inode body */
-+ error = ext3_xattr_ibody_list(inode, buffer, buffer_size);
-+ if (error < 0) {
-+ /* some error occured while collecting
-+ * attributes in inode body */
-+ size = 0;
-+ goto cleanup;
-+ }
-+ size = error;
-+
-+ /* get list of attributes stored in dedicated block */
-+ if (buffer) {
-+ buffer_size -= error;
-+ if (buffer_size <= 0) {
-+ buffer = NULL;
-+ buffer_size = 0;
-+ } else
-+ buffer += error;
-+ }
-+
-+ error = ext3_xattr_block_list(inode, buffer, buffer_size);
-+ if (error < 0)
-+ /* listing was successful, so we return len */
-+ size = 0;
-+
-+cleanup:
-+ up_read(&EXT3_I(inode)->xattr_sem);
-+ return error + size;
-+}
-+
- /*
- * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
- * not set, set it.
-@@ -356,6 +583,279 @@
- }
-
- /*
-+ * ext3_xattr_ibody_find()
-+ *
-+ * search attribute and calculate free space in inode body
-+ * NOTE: free space includes space our attribute hold
-+ */
-+int
-+ext3_xattr_ibody_find(struct inode *inode, int name_index,
-+ const char *name, struct ext3_xattr_entry *rentry, int *free)
-+{
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ int name_len = strlen(name);
-+ int err, storage_size;
-+ struct ext3_iloc iloc;
-+ char *start, *end;
-+ int ret = -ENOENT;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return ret;
-+
-+ err = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (err)
-+ return -EIO;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ *free = storage_size - sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_find",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len)) {
-+ memcpy(rentry, last, sizeof(struct ext3_xattr_entry));
-+ ret = 0;
-+ } else {
-+ *free -= EXT3_XATTR_LEN(last->e_name_len);
-+ *free -= le32_to_cpu(last->e_value_size);
-+ }
-+ last = next;
-+ }
-+
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_block_find()
-+ *
-+ * search attribute and calculate free space in EA block (if it allocated)
-+ * NOTE: free space includes space our attribute hold
-+ */
-+int
-+ext3_xattr_block_find(struct inode *inode, int name_index, const char *name,
-+ struct ext3_xattr_entry *rentry, int *free)
-+{
-+ struct buffer_head *bh = NULL;
-+ struct ext3_xattr_entry *entry;
-+ char *end;
-+ int name_len, error = -ENOENT;
-+
-+ if (!EXT3_I(inode)->i_file_acl) {
-+ *free = inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_xattr_header) -
-+ sizeof(__u32);
-+ return -ENOENT;
-+ }
-+ ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl);
-+ bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-+ if (!bh)
-+ return -EIO;
-+ ea_bdebug(bh, "b_count=%d, refcount=%d",
-+ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
-+ end = bh->b_data + bh->b_size;
-+ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-+ HDR(bh)->h_blocks != cpu_to_le32(1)) {
-+bad_block: ext3_error(inode->i_sb, "ext3_xattr_get",
-+ "inode %ld: bad block %d", inode->i_ino,
-+ EXT3_I(inode)->i_file_acl);
-+ brelse(bh);
-+ return -EIO;
-+ }
-+ /* find named attribute */
-+ name_len = strlen(name);
-+ *free = bh->b_size - sizeof(__u32);
-+
-+ entry = FIRST_ENTRY(bh);
-+ while (!IS_LAST_ENTRY(entry)) {
-+ struct ext3_xattr_entry *next =
-+ EXT3_XATTR_NEXT(entry);
-+ if ((char *)next >= end)
-+ goto bad_block;
-+ if (name_index == entry->e_name_index &&
-+ name_len == entry->e_name_len &&
-+ memcmp(name, entry->e_name, name_len) == 0) {
-+ memcpy(rentry, entry, sizeof(struct ext3_xattr_entry));
-+ error = 0;
-+ } else {
-+ *free -= EXT3_XATTR_LEN(entry->e_name_len);
-+ *free -= le32_to_cpu(entry->e_value_size);
-+ }
-+ entry = next;
-+ }
-+ brelse(bh);
-+
-+ return error;
-+}
-+
-+/*
-+ * ext3_xattr_inode_set()
-+ *
-+ * this routine add/remove/replace attribute in inode body
-+ */
-+int
-+ext3_xattr_ibody_set(handle_t *handle, struct inode *inode, int name_index,
-+ const char *name, const void *value, size_t value_len,
-+ int flags)
-+{
-+ struct ext3_xattr_entry *last, *next, *here = NULL;
-+ struct ext3_inode *raw_inode;
-+ int name_len = strlen(name);
-+ int esize = EXT3_XATTR_LEN(name_len);
-+ struct buffer_head *bh;
-+ int err, storage_size;
-+ struct ext3_iloc iloc;
-+ int free, min_offs;
-+ char *start, *end;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return -ENOSPC;
-+
-+ err = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (err)
-+ return err;
-+ raw_inode = ext3_raw_inode(&iloc);
-+ bh = iloc.bh;
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if ((*(__u32*) start) != EXT3_XATTR_MAGIC) {
-+ /* inode had no attributes before */
-+ *((__u32*) start) = cpu_to_le32(EXT3_XATTR_MAGIC);
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+ min_offs = storage_size;
-+ free = storage_size - sizeof(__u32);
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_set",
-+ "inode %ld", inode->i_ino);
-+ brelse(bh);
-+ return -EIO;
-+ }
-+
-+ if (last->e_value_size) {
-+ int offs = le16_to_cpu(last->e_value_offs);
-+ if (offs < min_offs)
-+ min_offs = offs;
-+ }
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len))
-+ here = last;
-+ else {
-+ /* we calculate all but our attribute
-+ * because it will be removed before changing */
-+ free -= EXT3_XATTR_LEN(last->e_name_len);
-+ free -= le32_to_cpu(last->e_value_size);
-+ }
-+ last = next;
-+ }
-+
-+ if (value && (esize + value_len > free)) {
-+ brelse(bh);
-+ return -ENOSPC;
-+ }
-+
-+ err = ext3_reserve_inode_write(handle, inode, &iloc);
-+ if (err) {
-+ brelse(bh);
-+ return err;
-+ }
-+
-+ if (here) {
-+ /* time to remove old value */
-+ struct ext3_xattr_entry *e;
-+ int size = le32_to_cpu(here->e_value_size);
-+ int border = le16_to_cpu(here->e_value_offs);
-+ char *src;
-+
-+ /* move tail */
-+ memmove(start + min_offs + size, start + min_offs,
-+ border - min_offs);
-+
-+ /* recalculate offsets */
-+ e = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(e)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(e);
-+ int offs = le16_to_cpu(e->e_value_offs);
-+ if (offs < border)
-+ e->e_value_offs =
-+ cpu_to_le16(offs + size);
-+ e = next;
-+ }
-+ min_offs += size;
-+
-+ /* remove entry */
-+ border = EXT3_XATTR_LEN(here->e_name_len);
-+ src = (char *) here + EXT3_XATTR_LEN(here->e_name_len);
-+ size = (char *) last - src;
-+ if ((char *) here + size > end)
-+ printk("ALERT at %s:%d: 0x%p + %d > 0x%p\n",
-+ __FILE__, __LINE__, here, size, end);
-+ memmove(here, src, size);
-+ last = (struct ext3_xattr_entry *) ((char *) last - border);
-+ *((__u32 *) last) = 0;
-+ }
-+
-+ if (value) {
-+ int offs = min_offs - value_len;
-+ /* use last to create new entry */
-+ last->e_name_len = strlen(name);
-+ last->e_name_index = name_index;
-+ last->e_value_offs = cpu_to_le16(offs);
-+ last->e_value_size = cpu_to_le32(value_len);
-+ last->e_hash = last->e_value_block = 0;
-+ memset(last->e_name, 0, esize);
-+ memcpy(last->e_name, name, last->e_name_len);
-+ if (start + offs + value_len > end)
-+ printk("ALERT at %s:%d: 0x%p + %d + %zd > 0x%p\n",
-+ __FILE__, __LINE__, start, offs,
-+ value_len, end);
-+ memcpy(start + offs, value, value_len);
-+ last = EXT3_XATTR_NEXT(last);
-+ *((__u32 *) last) = 0;
-+ }
-+
-+ ext3_mark_iloc_dirty(handle, inode, &iloc);
-+ brelse(bh);
-+
-+ return 0;
-+}
-+
-+/*
- * ext3_xattr_set_handle()
- *
- * Create, replace or remove an extended attribute for this inode. Buffer
-@@ -369,6 +869,104 @@
- */
- int
- ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-+ const char *name, const void *value, size_t value_len,
-+ int flags)
-+{
-+ struct ext3_xattr_entry entry;
-+ int err, where = 0, found = 0, total;
-+ int free1 = -1, free2 = -1;
-+ int name_len;
-+
-+ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-+ name_index, name, value, (long)value_len);
-+
-+ if (IS_RDONLY(inode))
-+ return -EROFS;
-+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
-+ return -EPERM;
-+ if (value == NULL)
-+ value_len = 0;
-+ if (name == NULL)
-+ return -EINVAL;
-+ name_len = strlen(name);
-+ if (name_len > 255 || value_len > inode->i_sb->s_blocksize)
-+ return -ERANGE;
-+ down_write(&EXT3_I(inode)->xattr_sem);
-+
-+ /* try to find attribute in inode body */
-+ err = ext3_xattr_ibody_find(inode, name_index, name, &entry, &free1);
-+ if (err == 0) {
-+ /* found EA in inode */
-+ found = 1;
-+ where = 0;
-+ } else if (err == -ENOENT) {
-+ /* there is no such attribute in inode body */
-+ /* try to find attribute in dedicated block */
-+ err = ext3_xattr_block_find(inode, name_index, name,
-+ &entry, &free2);
-+ if (err != 0 && err != -ENOENT) {
-+ /* not found EA in block */
-+ goto finish;
-+ } else if (err == 0) {
-+ /* found EA in block */
-+ where = 1;
-+ found = 1;
-+ }
-+ } else
-+ goto finish;
-+
-+ /* check flags: may replace? may create ? */
-+ if (found && (flags & XATTR_CREATE)) {
-+ err = -EEXIST;
-+ goto finish;
-+ } else if (!found && (flags & XATTR_REPLACE)) {
-+ err = -ENODATA;
-+ goto finish;
-+ }
-+
-+ /* check if we have enough space to store attribute */
-+ total = EXT3_XATTR_LEN(strlen(name)) + value_len;
-+ if (free1 >= 0 && total > free1 && free2 >= 0 && total > free2) {
-+ /* have no enough space */
-+ err = -ENOSPC;
-+ goto finish;
-+ }
-+
-+ /* time to remove attribute */
-+ if (found) {
-+ if (where == 0) {
-+ /* EA is stored in inode body */
-+ ext3_xattr_ibody_set(handle, inode, name_index, name,
-+ NULL, 0, flags);
-+ } else {
-+ /* EA is stored in separated block */
-+ ext3_xattr_block_set(handle, inode, name_index, name,
-+ NULL, 0, flags);
-+ }
-+ }
-+
-+ /* try to store EA in inode body */
-+ err = ext3_xattr_ibody_set(handle, inode, name_index, name,
-+ value, value_len, flags);
-+ if (err) {
-+ /* can't store EA in inode body */
-+ /* try to store in block */
-+ err = ext3_xattr_block_set(handle, inode, name_index,
-+ name, value, value_len, flags);
-+ }
-+
-+finish:
-+ up_write(&EXT3_I(inode)->xattr_sem);
-+ return err;
-+}
-+
-+/*
-+ * ext3_xattr_block_set()
-+ *
-+ * this routine add/remove/replace attribute in EA block
-+ */
-+int
-+ext3_xattr_block_set(handle_t *handle, struct inode *inode, int name_index,
- const char *name, const void *value, size_t value_len,
- int flags)
- {
-@@ -391,22 +989,7 @@
- * towards the end of the block).
- * end -- Points right after the block pointed to by header.
- */
--
-- ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-- name_index, name, value, (long)value_len);
--
-- if (IS_RDONLY(inode))
-- return -EROFS;
-- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
-- return -EPERM;
-- if (value == NULL)
-- value_len = 0;
-- if (name == NULL)
-- return -EINVAL;
- name_len = strlen(name);
-- if (name_len > 255 || value_len > sb->s_blocksize)
-- return -ERANGE;
-- down_write(&EXT3_I(inode)->xattr_sem);
- if (EXT3_I(inode)->i_file_acl) {
- /* The inode already has an extended attribute block. */
- bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
-@@ -638,7 +1221,6 @@
- brelse(bh);
- if (!(bh && header == HDR(bh)))
- kfree(header);
-- up_write(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-Index: linux-stage/fs/ext3/xattr.h
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.h 2005-10-04 16:50:11.000000000 -0600
-+++ linux-stage/fs/ext3/xattr.h 2005-10-04 17:07:25.000000000 -0600
-@@ -67,7 +67,8 @@
- extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
- extern int ext3_xattr_list(struct inode *, char *, size_t);
- extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
--extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-+extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *,const void *,size_t,int);
-+extern int ext3_xattr_block_set(handle_t *, struct inode *, int, const char *,const void *,size_t,int);
-
- extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
- extern void ext3_xattr_put_super(struct super_block *);
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2005-10-04 16:53:29.000000000 -0600
-+++ linux-stage/include/linux/ext3_fs.h 2005-10-04 17:07:25.000000000 -0600
-@@ -293,6 +293,8 @@
- __u32 m_i_reserved2[2];
- } masix2;
- } osd2; /* OS dependent 2 */
-+ __u16 i_extra_isize;
-+ __u16 i_pad1;
- };
-
- #define i_size_high i_dir_acl
-@@ -757,6 +759,7 @@
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-+int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc, int in_mem);
-
- extern void ext3_read_inode (struct inode *);
- extern int ext3_write_inode (struct inode *, int);
-Index: linux-stage/include/linux/ext3_fs_i.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_i.h 2005-10-04 16:50:11.000000000 -0600
-+++ linux-stage/include/linux/ext3_fs_i.h 2005-10-04 17:07:25.000000000 -0600
-@@ -113,6 +113,9 @@
- */
- loff_t i_disksize;
-
-+ /* on-disk additional length */
-+ __u16 i_extra_isize;
-+
- /*
- * truncate_sem is for serialising ext3_truncate() against
- * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
+++ /dev/null
-%patch
-Index: linux-2.6.0/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.0.orig/fs/ext3/ialloc.c 2004-01-14 18:54:11.000000000 +0300
-+++ linux-2.6.0/fs/ext3/ialloc.c 2004-01-14 18:54:12.000000000 +0300
-@@ -627,6 +627,9 @@
- inode->i_generation = EXT3_SB(sb)->s_next_generation++;
-
- ei->i_state = EXT3_STATE_NEW;
-+ ei->i_extra_isize =
-+ (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-+ sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-2.6.0/fs/ext3/inode.c
-===================================================================
---- linux-2.6.0.orig/fs/ext3/inode.c 2004-01-14 18:54:12.000000000 +0300
-+++ linux-2.6.0/fs/ext3/inode.c 2004-01-14 19:09:46.000000000 +0300
-@@ -2339,7 +2339,7 @@
- * trying to determine the inode's location on-disk and no read need be
- * performed.
- */
--static int ext3_get_inode_loc(struct inode *inode,
-+int ext3_get_inode_loc(struct inode *inode,
- struct ext3_iloc *iloc, int in_mem)
- {
- unsigned long block;
-@@ -2547,6 +2547,11 @@
- ei->i_data[block] = raw_inode->i_block[block];
- INIT_LIST_HEAD(&ei->i_orphan);
-
-+ if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
-+ ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-+ else
-+ ei->i_extra_isize = 0;
-+
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ext3_file_inode_operations;
- inode->i_fop = &ext3_file_operations;
-@@ -2682,6 +2687,9 @@
- } else for (block = 0; block < EXT3_N_BLOCKS; block++)
- raw_inode->i_block[block] = ei->i_data[block];
-
-+ if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
-+ raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-+
- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- rc = ext3_journal_dirty_metadata(handle, bh);
- if (!err)
-@@ -2849,7 +2857,8 @@ ext3_reserve_inode_write(handle_t *handl
- {
- int err = 0;
- if (handle) {
-- err = ext3_get_inode_loc(inode, iloc, 1);
-+ err = ext3_get_inode_loc(inode, iloc, EXT3_I(inode)->i_state &
-+ EXT3_STATE_NEW);
- if (!err) {
- BUFFER_TRACE(iloc->bh, "get_write_access");
- err = ext3_journal_get_write_access(handle, iloc->bh);
-Index: linux-2.6.0/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.0.orig/fs/ext3/xattr.c 2003-12-30 08:33:13.000000000 +0300
-+++ linux-2.6.0/fs/ext3/xattr.c 2004-01-14 18:54:12.000000000 +0300
-@@ -246,17 +246,12 @@
- }
-
- /*
-- * ext3_xattr_get()
-- *
-- * Copy an extended attribute into the buffer
-- * provided, or compute the buffer size required.
-- * Buffer is NULL to compute the size of the buffer required.
-+ * ext3_xattr_block_get()
- *
-- * Returns a negative error number on failure, or the number of bytes
-- * used / required on success.
-+ * routine looks for attribute in EA block and returns it's value and size
- */
- int
--ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-+ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
- void *buffer, size_t buffer_size)
- {
- struct buffer_head *bh = NULL;
-@@ -270,7 +265,6 @@
-
- if (name == NULL)
- return -EINVAL;
-- down_read(&EXT3_I(inode)->xattr_sem);
- error = -ENODATA;
- if (!EXT3_I(inode)->i_file_acl)
- goto cleanup;
-@@ -343,15 +337,87 @@
-
- cleanup:
- brelse(bh);
-- up_read(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-
- /*
-- * ext3_xattr_list()
-+ * ext3_xattr_ibody_get()
- *
-- * Copy a list of attribute names into the buffer
-+ * routine looks for attribute in inode body and returns it's value and size
-+ */
-+int
-+ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-+ void *buffer, size_t buffer_size)
-+{
-+ int size, name_len = strlen(name), storage_size;
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ struct ext3_iloc iloc;
-+ char *start, *end;
-+ int ret = -ENOENT;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return -ENOENT;
-+
-+ ret = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (ret)
-+ return ret;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_get",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len))
-+ goto found;
-+ last = next;
-+ }
-+
-+ /* can't find EA */
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+
-+found:
-+ size = le32_to_cpu(last->e_value_size);
-+ if (buffer) {
-+ ret = -ERANGE;
-+ if (buffer_size >= size) {
-+ memcpy(buffer, start + le16_to_cpu(last->e_value_offs),
-+ size);
-+ ret = size;
-+ }
-+ } else
-+ ret = size;
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_get()
-+ *
-+ * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
-@@ -359,7 +425,31 @@
- * used / required on success.
- */
- int
--ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
-+ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-+ void *buffer, size_t buffer_size)
-+{
-+ int err;
-+
-+ down_read(&EXT3_I(inode)->xattr_sem);
-+
-+ /* try to find attribute in inode body */
-+ err = ext3_xattr_ibody_get(inode, name_index, name,
-+ buffer, buffer_size);
-+ if (err < 0)
-+ /* search was unsuccessful, try to find EA in dedicated block */
-+ err = ext3_xattr_block_get(inode, name_index, name,
-+ buffer, buffer_size);
-+ up_read(&EXT3_I(inode)->xattr_sem);
-+
-+ return err;
-+}
-+
-+/* ext3_xattr_ibody_list()
-+ *
-+ * generate list of attributes stored in EA block
-+ */
-+int
-+ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
- {
- struct buffer_head *bh = NULL;
- struct ext3_xattr_entry *entry;
-@@ -370,7 +460,6 @@
- ea_idebug(inode, "buffer=%p, buffer_size=%ld",
- buffer, (long)buffer_size);
-
-- down_read(&EXT3_I(inode)->xattr_sem);
- error = 0;
- if (!EXT3_I(inode)->i_file_acl)
- goto cleanup;
-@@ -431,11 +520,138 @@
-
- cleanup:
- brelse(bh);
-- up_read(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-
-+/* ext3_xattr_ibody_list()
-+ *
-+ * generate list of attributes stored in inode body
-+ */
-+int
-+ext3_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
-+{
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ char *start, *end, *buf;
-+ struct ext3_iloc iloc;
-+ int storage_size;
-+ int ret;
-+ int size = 0;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return 0;
-+
-+ ret = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (ret)
-+ return ret;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return 0;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ struct ext3_xattr_handler *handler;
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_list",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+ handler = ext3_xattr_handler(last->e_name_index);
-+ if (handler)
-+ size += handler->list(NULL, inode, last->e_name,
-+ last->e_name_len);
-+ last = next;
-+ }
-+
-+ if (!buffer) {
-+ ret = size;
-+ goto cleanup;
-+ } else {
-+ ret = -ERANGE;
-+ if (size > buffer_size)
-+ goto cleanup;
-+ }
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ buf = buffer;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ struct ext3_xattr_handler *handler;
-+ handler = ext3_xattr_handler(last->e_name_index);
-+ if (handler)
-+ buf += handler->list(buf, inode, last->e_name,
-+ last->e_name_len);
-+ last = next;
-+ }
-+ ret = size;
-+cleanup:
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_list()
-+ *
-+ * Copy a list of attribute names into the buffer
-+ * provided, or compute the buffer size required.
-+ * Buffer is NULL to compute the size of the buffer required.
-+ *
-+ * Returns a negative error number on failure, or the number of bytes
-+ * used / required on success.
-+ */
-+int
-+ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
-+{
-+ int error;
-+ int size = buffer_size;
-+
-+ down_read(&EXT3_I(inode)->xattr_sem);
-+
-+ /* get list of attributes stored in inode body */
-+ error = ext3_xattr_ibody_list(inode, buffer, buffer_size);
-+ if (error < 0) {
-+ /* some error occured while collecting
-+ * attributes in inode body */
-+ size = 0;
-+ goto cleanup;
-+ }
-+ size = error;
-+
-+ /* get list of attributes stored in dedicated block */
-+ if (buffer) {
-+ buffer_size -= error;
-+ if (buffer_size <= 0) {
-+ buffer = NULL;
-+ buffer_size = 0;
-+ } else
-+ buffer += error;
-+ }
-+
-+ error = ext3_xattr_block_list(inode, buffer, buffer_size);
-+ if (error < 0)
-+ /* listing was successful, so we return len */
-+ size = 0;
-+
-+cleanup:
-+ up_read(&EXT3_I(inode)->xattr_sem);
-+ return error + size;
-+}
-+
- /*
- * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
- * not set, set it.
-@@ -457,6 +673,279 @@
- }
-
- /*
-+ * ext3_xattr_ibody_find()
-+ *
-+ * search attribute and calculate free space in inode body
-+ * NOTE: free space includes space our attribute hold
-+ */
-+int
-+ext3_xattr_ibody_find(struct inode *inode, int name_index,
-+ const char *name, struct ext3_xattr_entry *rentry, int *free)
-+{
-+ struct ext3_xattr_entry *last;
-+ struct ext3_inode *raw_inode;
-+ int name_len = strlen(name);
-+ int err, storage_size;
-+ struct ext3_iloc iloc;
-+ char *start, *end;
-+ int ret = -ENOENT;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return ret;
-+
-+ err = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (err)
-+ return -EIO;
-+ raw_inode = ext3_raw_inode(&iloc);
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ *free = storage_size - sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if (le32_to_cpu((*(__u32*) start)) != EXT3_XATTR_MAGIC) {
-+ brelse(iloc.bh);
-+ return -ENOENT;
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_find",
-+ "inode %ld", inode->i_ino);
-+ brelse(iloc.bh);
-+ return -EIO;
-+ }
-+
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len)) {
-+ memcpy(rentry, last, sizeof(struct ext3_xattr_entry));
-+ ret = 0;
-+ } else {
-+ *free -= EXT3_XATTR_LEN(last->e_name_len);
-+ *free -= le32_to_cpu(last->e_value_size);
-+ }
-+ last = next;
-+ }
-+
-+ brelse(iloc.bh);
-+ return ret;
-+}
-+
-+/*
-+ * ext3_xattr_block_find()
-+ *
-+ * search attribute and calculate free space in EA block (if it allocated)
-+ * NOTE: free space includes space our attribute hold
-+ */
-+int
-+ext3_xattr_block_find(struct inode *inode, int name_index, const char *name,
-+ struct ext3_xattr_entry *rentry, int *free)
-+{
-+ struct buffer_head *bh = NULL;
-+ struct ext3_xattr_entry *entry;
-+ char *end;
-+ int name_len, error = -ENOENT;
-+
-+ if (!EXT3_I(inode)->i_file_acl) {
-+ *free = inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_xattr_header) -
-+ sizeof(__u32);
-+ return -ENOENT;
-+ }
-+ ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl);
-+ bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-+ if (!bh)
-+ return -EIO;
-+ ea_bdebug(bh, "b_count=%d, refcount=%d",
-+ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
-+ end = bh->b_data + bh->b_size;
-+ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-+ HDR(bh)->h_blocks != cpu_to_le32(1)) {
-+bad_block: ext3_error(inode->i_sb, "ext3_xattr_get",
-+ "inode %ld: bad block %d", inode->i_ino,
-+ EXT3_I(inode)->i_file_acl);
-+ brelse(bh);
-+ return -EIO;
-+ }
-+ /* find named attribute */
-+ name_len = strlen(name);
-+ *free = bh->b_size - sizeof(__u32);
-+
-+ entry = FIRST_ENTRY(bh);
-+ while (!IS_LAST_ENTRY(entry)) {
-+ struct ext3_xattr_entry *next =
-+ EXT3_XATTR_NEXT(entry);
-+ if ((char *)next >= end)
-+ goto bad_block;
-+ if (name_index == entry->e_name_index &&
-+ name_len == entry->e_name_len &&
-+ memcmp(name, entry->e_name, name_len) == 0) {
-+ memcpy(rentry, entry, sizeof(struct ext3_xattr_entry));
-+ error = 0;
-+ } else {
-+ *free -= EXT3_XATTR_LEN(entry->e_name_len);
-+ *free -= le32_to_cpu(entry->e_value_size);
-+ }
-+ entry = next;
-+ }
-+ brelse(bh);
-+
-+ return error;
-+}
-+
-+/*
-+ * ext3_xattr_inode_set()
-+ *
-+ * this routine add/remove/replace attribute in inode body
-+ */
-+int
-+ext3_xattr_ibody_set(handle_t *handle, struct inode *inode, int name_index,
-+ const char *name, const void *value, size_t value_len,
-+ int flags)
-+{
-+ struct ext3_xattr_entry *last, *next, *here = NULL;
-+ struct ext3_inode *raw_inode;
-+ int name_len = strlen(name);
-+ int esize = EXT3_XATTR_LEN(name_len);
-+ struct buffer_head *bh;
-+ int err, storage_size;
-+ struct ext3_iloc iloc;
-+ int free, min_offs;
-+ char *start, *end;
-+
-+ if (EXT3_SB(inode->i_sb)->s_inode_size <= EXT3_GOOD_OLD_INODE_SIZE)
-+ return -ENOSPC;
-+
-+ err = ext3_get_inode_loc(inode, &iloc, 0);
-+ if (err)
-+ return err;
-+ raw_inode = ext3_raw_inode(&iloc);
-+ bh = iloc.bh;
-+
-+ storage_size = EXT3_SB(inode->i_sb)->s_inode_size -
-+ EXT3_GOOD_OLD_INODE_SIZE -
-+ EXT3_I(inode)->i_extra_isize -
-+ sizeof(__u32);
-+ start = (char *) raw_inode + EXT3_GOOD_OLD_INODE_SIZE +
-+ EXT3_I(inode)->i_extra_isize;
-+ if ((*(__u32*) start) != EXT3_XATTR_MAGIC) {
-+ /* inode had no attributes before */
-+ *((__u32*) start) = cpu_to_le32(EXT3_XATTR_MAGIC);
-+ }
-+ start += sizeof(__u32);
-+ end = (char *) raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-+ min_offs = storage_size;
-+ free = storage_size - sizeof(__u32);
-+
-+ last = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(last)) {
-+ next = EXT3_XATTR_NEXT(last);
-+ if (le32_to_cpu(last->e_value_size) > storage_size ||
-+ (char *) next >= end) {
-+ ext3_error(inode->i_sb, "ext3_xattr_ibody_set",
-+ "inode %ld", inode->i_ino);
-+ brelse(bh);
-+ return -EIO;
-+ }
-+
-+ if (last->e_value_size) {
-+ int offs = le16_to_cpu(last->e_value_offs);
-+ if (offs < min_offs)
-+ min_offs = offs;
-+ }
-+ if (name_index == last->e_name_index &&
-+ name_len == last->e_name_len &&
-+ !memcmp(name, last->e_name, name_len))
-+ here = last;
-+ else {
-+ /* we calculate all but our attribute
-+ * because it will be removed before changing */
-+ free -= EXT3_XATTR_LEN(last->e_name_len);
-+ free -= le32_to_cpu(last->e_value_size);
-+ }
-+ last = next;
-+ }
-+
-+ if (value && (esize + value_len > free)) {
-+ brelse(bh);
-+ return -ENOSPC;
-+ }
-+
-+ err = ext3_reserve_inode_write(handle, inode, &iloc);
-+ if (err) {
-+ brelse(bh);
-+ return err;
-+ }
-+
-+ if (here) {
-+ /* time to remove old value */
-+ struct ext3_xattr_entry *e;
-+ int size = le32_to_cpu(here->e_value_size);
-+ int border = le16_to_cpu(here->e_value_offs);
-+ char *src;
-+
-+ /* move tail */
-+ memmove(start + min_offs + size, start + min_offs,
-+ border - min_offs);
-+
-+ /* recalculate offsets */
-+ e = (struct ext3_xattr_entry *) start;
-+ while (!IS_LAST_ENTRY(e)) {
-+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(e);
-+ int offs = le16_to_cpu(e->e_value_offs);
-+ if (offs < border)
-+ e->e_value_offs =
-+ cpu_to_le16(offs + size);
-+ e = next;
-+ }
-+ min_offs += size;
-+
-+ /* remove entry */
-+ border = EXT3_XATTR_LEN(here->e_name_len);
-+ src = (char *) here + EXT3_XATTR_LEN(here->e_name_len);
-+ size = (char *) last - src;
-+ if ((char *) here + size > end)
-+ printk("ALERT at %s:%d: 0x%p + %d > 0x%p\n",
-+ __FILE__, __LINE__, here, size, end);
-+ memmove(here, src, size);
-+ last = (struct ext3_xattr_entry *) ((char *) last - border);
-+ *((__u32 *) last) = 0;
-+ }
-+
-+ if (value) {
-+ int offs = min_offs - value_len;
-+ /* use last to create new entry */
-+ last->e_name_len = strlen(name);
-+ last->e_name_index = name_index;
-+ last->e_value_offs = cpu_to_le16(offs);
-+ last->e_value_size = cpu_to_le32(value_len);
-+ last->e_hash = last->e_value_block = 0;
-+ memset(last->e_name, 0, esize);
-+ memcpy(last->e_name, name, last->e_name_len);
-+ if (start + offs + value_len > end)
-+ printk("ALERT at %s:%d: 0x%p + %d + %zd > 0x%p\n",
-+ __FILE__, __LINE__, start, offs,
-+ value_len, end);
-+ memcpy(start + offs, value, value_len);
-+ last = EXT3_XATTR_NEXT(last);
-+ *((__u32 *) last) = 0;
-+ }
-+
-+ ext3_mark_iloc_dirty(handle, inode, &iloc);
-+ brelse(bh);
-+
-+ return 0;
-+}
-+
-+/*
- * ext3_xattr_set_handle()
- *
- * Create, replace or remove an extended attribute for this inode. Buffer
-@@ -470,6 +959,104 @@
- */
- int
- ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-+ const char *name, const void *value, size_t value_len,
-+ int flags)
-+{
-+ struct ext3_xattr_entry entry;
-+ int err, where = 0, found = 0, total;
-+ int free1 = -1, free2 = -1;
-+ int name_len;
-+
-+ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-+ name_index, name, value, (long)value_len);
-+
-+ if (IS_RDONLY(inode))
-+ return -EROFS;
-+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
-+ return -EPERM;
-+ if (value == NULL)
-+ value_len = 0;
-+ if (name == NULL)
-+ return -EINVAL;
-+ name_len = strlen(name);
-+ if (name_len > 255 || value_len > inode->i_sb->s_blocksize)
-+ return -ERANGE;
-+ down_write(&EXT3_I(inode)->xattr_sem);
-+
-+ /* try to find attribute in inode body */
-+ err = ext3_xattr_ibody_find(inode, name_index, name, &entry, &free1);
-+ if (err == 0) {
-+ /* found EA in inode */
-+ found = 1;
-+ where = 0;
-+ } else if (err == -ENOENT) {
-+ /* there is no such attribute in inode body */
-+ /* try to find attribute in dedicated block */
-+ err = ext3_xattr_block_find(inode, name_index, name,
-+ &entry, &free2);
-+ if (err != 0 && err != -ENOENT) {
-+ /* not found EA in block */
-+ goto finish;
-+ } else if (err == 0) {
-+ /* found EA in block */
-+ where = 1;
-+ found = 1;
-+ }
-+ } else
-+ goto finish;
-+
-+ /* check flags: may replace? may create ? */
-+ if (found && (flags & XATTR_CREATE)) {
-+ err = -EEXIST;
-+ goto finish;
-+ } else if (!found && (flags & XATTR_REPLACE)) {
-+ err = -ENODATA;
-+ goto finish;
-+ }
-+
-+ /* check if we have enough space to store attribute */
-+ total = EXT3_XATTR_LEN(strlen(name)) + value_len;
-+ if (free1 >= 0 && total > free1 && free2 >= 0 && total > free2) {
-+ /* have no enough space */
-+ err = -ENOSPC;
-+ goto finish;
-+ }
-+
-+ /* time to remove attribute */
-+ if (found) {
-+ if (where == 0) {
-+ /* EA is stored in inode body */
-+ ext3_xattr_ibody_set(handle, inode, name_index, name,
-+ NULL, 0, flags);
-+ } else {
-+ /* EA is stored in separated block */
-+ ext3_xattr_block_set(handle, inode, name_index, name,
-+ NULL, 0, flags);
-+ }
-+ }
-+
-+ /* try to store EA in inode body */
-+ err = ext3_xattr_ibody_set(handle, inode, name_index, name,
-+ value, value_len, flags);
-+ if (err) {
-+ /* can't store EA in inode body */
-+ /* try to store in block */
-+ err = ext3_xattr_block_set(handle, inode, name_index,
-+ name, value, value_len, flags);
-+ }
-+
-+finish:
-+ up_write(&EXT3_I(inode)->xattr_sem);
-+ return err;
-+}
-+
-+/*
-+ * ext3_xattr_block_set()
-+ *
-+ * this routine add/remove/replace attribute in EA block
-+ */
-+int
-+ext3_xattr_block_set(handle_t *handle, struct inode *inode, int name_index,
- const char *name, const void *value, size_t value_len,
- int flags)
- {
-@@ -492,22 +1078,7 @@
- * towards the end of the block).
- * end -- Points right after the block pointed to by header.
- */
--
-- ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-- name_index, name, value, (long)value_len);
--
-- if (IS_RDONLY(inode))
-- return -EROFS;
-- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
-- return -EPERM;
-- if (value == NULL)
-- value_len = 0;
-- if (name == NULL)
-- return -EINVAL;
- name_len = strlen(name);
-- if (name_len > 255 || value_len > sb->s_blocksize)
-- return -ERANGE;
-- down_write(&EXT3_I(inode)->xattr_sem);
- if (EXT3_I(inode)->i_file_acl) {
- /* The inode already has an extended attribute block. */
- bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
-@@ -733,7 +1304,6 @@
- brelse(bh);
- if (!(bh && header == HDR(bh)))
- kfree(header);
-- up_write(&EXT3_I(inode)->xattr_sem);
-
- return error;
- }
-Index: linux-2.6.0/fs/ext3/xattr.h
-===================================================================
---- linux-2.6.0.orig/fs/ext3/xattr.h 2003-06-24 18:04:43.000000000 +0400
-+++ linux-2.6.0/fs/ext3/xattr.h 2004-01-14 18:54:12.000000000 +0300
-@@ -77,7 +77,8 @@
- extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
- extern int ext3_xattr_list(struct inode *, char *, size_t);
- extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
--extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-+extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *,const void *,size_t,int);
-+extern int ext3_xattr_block_set(handle_t *, struct inode *, int, const char *,const void *,size_t,int);
-
- extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
- extern void ext3_xattr_put_super(struct super_block *);
-Index: linux-2.6.0/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.0.orig/include/linux/ext3_fs.h 2004-01-14 18:54:11.000000000 +0300
-+++ linux-2.6.0/include/linux/ext3_fs.h 2004-01-14 18:54:12.000000000 +0300
-@@ -265,6 +265,8 @@
- __u32 m_i_reserved2[2];
- } masix2;
- } osd2; /* OS dependent 2 */
-+ __u16 i_extra_isize;
-+ __u16 i_pad1;
- };
-
- #define i_size_high i_dir_acl
-@@ -721,6 +723,7 @@
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-+int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc, int in_mem);
-
- extern void ext3_read_inode (struct inode *);
- extern void ext3_write_inode (struct inode *, int);
-Index: linux-2.6.0/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.0.orig/include/linux/ext3_fs_i.h 2003-12-30 08:32:44.000000000 +0300
-+++ linux-2.6.0/include/linux/ext3_fs_i.h 2004-01-14 18:54:12.000000000 +0300
-@@ -96,6 +96,9 @@
- */
- loff_t i_disksize;
-
-+ /* on-disk additional length */
-+ __u16 i_extra_isize;
-+
- /*
- * truncate_sem is for serialising ext3_truncate() against
- * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
-
-%diffstat
- fs/ext3/ialloc.c | 5
- fs/ext3/inode.c | 10
- fs/ext3/xattr.c | 634 +++++++++++++++++++++++++++++++++++++++++++---
- fs/ext3/xattr.h | 3
- include/linux/ext3_fs.h | 2
- include/linux/ext3_fs_i.h | 3
- 6 files changed, 623 insertions(+), 34 deletions(-)
-
+++ /dev/null
-Index: linux-2.6.12-rc6/fs/ext3/extents.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/extents.c 2005-06-14 16:31:25.756503133 +0200
-+++ linux-2.6.12-rc6/fs/ext3/extents.c 2005-06-14 16:31:25.836581257 +0200
-@@ -0,0 +1,2359 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+ (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ clear_buffer_new(bh_result);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ down(&EXT3_I(inode)->truncate_sem);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ set_buffer_new(bh_result);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ up(&EXT3_I(inode)->truncate_sem);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ down(&EXT3_I(inode)->truncate_sem);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ up(&EXT3_I(inode)->truncate_sem);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = EXT_DEPTH(&tree);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-2.6.12-rc6/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/ialloc.c 2005-06-14 16:31:08.634433030 +0200
-+++ linux-2.6.12-rc6/fs/ext3/ialloc.c 2005-06-14 16:31:25.846346882 +0200
-@@ -598,7 +598,7 @@
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -639,6 +639,18 @@
- DQUOT_FREE_INODE(inode);
- goto fail2;
- }
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-2.6.12-rc6/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/inode.c 2005-06-14 16:31:09.701815830 +0200
-+++ linux-2.6.12-rc6/fs/ext3/inode.c 2005-06-14 16:31:25.861971882 +0200
-@@ -40,7 +40,7 @@
- #include "iopen.h"
- #include "acl.h"
-
--static int ext3_writepage_trans_blocks(struct inode *inode);
-+int ext3_writepage_trans_blocks(struct inode *inode);
-
- /*
- * Test whether an inode is a fast symlink.
-@@ -784,6 +784,17 @@
- return err;
- }
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_block_handle(handle, inode, block, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -794,8 +805,8 @@
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 1);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 1);
- return ret;
- }
-
-@@ -839,7 +850,7 @@
-
- get_block:
- if (ret == 0)
-- ret = ext3_get_block_handle(handle, inode, iblock,
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-@@ -859,7 +870,7 @@
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+ *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1593,7 +1604,7 @@
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2104,6 +2115,9 @@
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -2850,12 +2864,15 @@
- * block and work out the exact number of indirects which are touched. Pah.
- */
-
--static int ext3_writepage_trans_blocks(struct inode *inode)
-+int ext3_writepage_trans_blocks(struct inode *inode)
- {
- int bpp = ext3_journal_blocks_per_page(inode);
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-Index: linux-2.6.12-rc6/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/Makefile 2005-06-14 16:31:09.179354899 +0200
-+++ linux-2.6.12-rc6/fs/ext3/Makefile 2005-06-14 16:31:25.872714069 +0200
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-2.6.12-rc6/fs/ext3/super.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/super.c 2005-06-14 16:31:09.950839264 +0200
-+++ linux-2.6.12-rc6/fs/ext3/super.c 2005-06-14 16:31:25.886385944 +0200
-@@ -387,6 +387,7 @@
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -451,6 +452,8 @@
- #endif
- ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -593,6 +596,7 @@
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- };
-
- static match_table_t tokens = {
-@@ -644,6 +647,9 @@
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -953,6 +958,15 @@
- case Opt_nobh:
- set_opt(sbi->s_mount_opt, NOBH);
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1668,6 +1681,7 @@
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
-+ ext3_ext_init(sb);
- lock_kernel();
- return 0;
-
-Index: linux-2.6.12-rc6/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/ioctl.c 2005-06-14 16:31:08.646151780 +0200
-+++ linux-2.6.12-rc6/fs/ext3/ioctl.c 2005-06-14 16:31:25.897128131 +0200
-@@ -124,6 +124,10 @@
- err = ext3_change_inode_journal_flag(inode, jflag);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-2.6.12-rc6/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs.h 2005-06-14 16:31:10.185214261 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs.h 2005-06-14 16:31:52.859041864 +0200
-@@ -186,8 +186,9 @@
- #define EXT3_NOTAIL_FL 0x00008000 /* don't merge file tail */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
-@@ -237,6 +238,9 @@
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Structure of an inode on the disk
-@@ -360,6 +364,8 @@
- #define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
- #define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -548,11 +554,13 @@
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -759,6 +767,9 @@
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *,
-+ struct address_space *, loff_t);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -828,6 +837,16 @@
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-2.6.12-rc6/include/linux/ext3_extents.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_extents.h 2005-06-14 16:31:25.780917195 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_extents.h 2005-06-14 16:31:25.932284381 +0200
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-2.6.12-rc6/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs_i.h 2005-06-06 17:22:29.000000000 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs_i.h 2005-06-14 16:31:25.941073443 +0200
-@@ -133,6 +133,8 @@
- */
- struct semaphore truncate_sem;
- struct inode vfs_inode;
-+
-+ __u32 i_cached_extent[4];
- };
-
- #endif /* _LINUX_EXT3_FS_I */
+++ /dev/null
-Index: linux-2.6.16.21-0.8/fs/ext3/extents.c
-===================================================================
---- /dev/null
-+++ linux-2.6.16.21-0.8/fs/ext3/extents.c
-@@ -0,0 +1,2359 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+ (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ clear_buffer_new(bh_result);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ down(&EXT3_I(inode)->truncate_sem);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ set_buffer_new(bh_result);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ up(&EXT3_I(inode)->truncate_sem);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ down(&EXT3_I(inode)->truncate_sem);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ up(&EXT3_I(inode)->truncate_sem);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = EXT_DEPTH(&tree);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-2.6.16.21-0.8/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.16.21-0.8.orig/fs/ext3/ialloc.c
-+++ linux-2.6.16.21-0.8/fs/ext3/ialloc.c
-@@ -598,7 +598,7 @@ got:
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -642,6 +642,18 @@ got:
- if (err)
- goto fail_free_drop;
-
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-2.6.16.21-0.8/fs/ext3/inode.c
-===================================================================
---- linux-2.6.16.21-0.8.orig/fs/ext3/inode.c
-+++ linux-2.6.16.21-0.8/fs/ext3/inode.c
-@@ -40,7 +40,7 @@
- #include "iopen.h"
- #include "acl.h"
-
--static int ext3_writepage_trans_blocks(struct inode *inode);
-+int ext3_writepage_trans_blocks(struct inode *inode);
-
- /*
- * Test whether an inode is a fast symlink.
-@@ -788,6 +788,17 @@ out:
- return err;
- }
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_block_handle(handle, inode, block, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -798,8 +809,8 @@ static int ext3_get_block(struct inode *
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 1);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 1);
- return ret;
- }
-
-@@ -843,7 +854,7 @@ ext3_direct_io_get_blocks(struct inode *
-
- get_block:
- if (ret == 0)
-- ret = ext3_get_block_handle(handle, inode, iblock,
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-@@ -863,7 +874,7 @@ struct buffer_head *ext3_getblk(handle_t
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+ *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1606,7 +1617,7 @@ void ext3_set_aops(struct inode *inode)
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2116,6 +2127,9 @@ void ext3_truncate(struct inode * inode)
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -2863,12 +2877,15 @@ err_out:
- * block and work out the exact number of indirects which are touched. Pah.
- */
-
--static int ext3_writepage_trans_blocks(struct inode *inode)
-+int ext3_writepage_trans_blocks(struct inode *inode)
- {
- int bpp = ext3_journal_blocks_per_page(inode);
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-Index: linux-2.6.16.21-0.8/fs/ext3/Makefile
-===================================================================
---- linux-2.6.16.21-0.8.orig/fs/ext3/Makefile
-+++ linux-2.6.16.21-0.8/fs/ext3/Makefile
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-2.6.16.21-0.8/fs/ext3/super.c
-===================================================================
---- linux-2.6.16.21-0.8.orig/fs/ext3/super.c
-+++ linux-2.6.16.21-0.8/fs/ext3/super.c
-@@ -392,6 +392,7 @@ static void ext3_put_super (struct super
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -456,6 +457,8 @@ static struct inode *ext3_alloc_inode(st
- #endif
- ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -638,6 +641,7 @@ enum {
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- Opt_grpquota
- };
-
-@@ -689,6 +693,9 @@ static match_table_t tokens = {
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -1030,6 +1036,15 @@ clear_qf_name:
- case Opt_nobh:
- set_opt(sbi->s_mount_opt, NOBH);
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1756,6 +1768,7 @@ static int ext3_fill_super (struct super
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
-+ ext3_ext_init(sb);
- lock_kernel();
- return 0;
-
-Index: linux-2.6.16.21-0.8/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.16.21-0.8.orig/fs/ext3/ioctl.c
-+++ linux-2.6.16.21-0.8/fs/ext3/ioctl.c
-@@ -125,6 +125,10 @@ flags_err:
- err = ext3_change_inode_journal_flag(inode, jflag);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-2.6.16.21-0.8/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.16.21-0.8.orig/include/linux/ext3_fs.h
-+++ linux-2.6.16.21-0.8/include/linux/ext3_fs.h
-@@ -185,9 +185,10 @@ struct ext3_group_desc
- #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
- /*
-@@ -237,6 +238,9 @@ struct ext3_new_group_data {
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Mount options
-@@ -377,6 +381,8 @@ struct ext3_inode {
- #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
- #define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x1000000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x2000000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -565,11 +571,13 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -776,6 +784,7 @@ extern unsigned long ext3_count_free (st
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *, struct address_space *, loff_t);
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -792,6 +801,7 @@ extern int ext3_get_inode_loc(struct ino
- extern void ext3_truncate (struct inode *);
- extern void ext3_set_inode_flags(struct inode *);
- extern void ext3_set_aops(struct inode *inode);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
-
- /* ioctl.c */
- extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
-@@ -845,6 +855,16 @@ extern struct inode_operations ext3_spec
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-2.6.16.21-0.8/include/linux/ext3_extents.h
-===================================================================
---- /dev/null
-+++ linux-2.6.16.21-0.8/include/linux/ext3_extents.h
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-2.6.16.21-0.8/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.16.21-0.8.orig/include/linux/ext3_fs_i.h
-+++ linux-2.6.16.21-0.8/include/linux/ext3_fs_i.h
-@@ -133,6 +133,8 @@ struct ext3_inode_info {
- */
- struct semaphore truncate_sem;
- struct inode vfs_inode;
-+
-+ __u32 i_cached_extent[4];
- };
-
- #endif /* _LINUX_EXT3_FS_I */
+++ /dev/null
-Index: linux-2.6.16.27-0.9/fs/ext3/extents.c
-===================================================================
---- /dev/null
-+++ linux-2.6.16.27-0.9/fs/ext3/extents.c
-@@ -0,0 +1,2359 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+ (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ clear_buffer_new(bh_result);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ down(&EXT3_I(inode)->truncate_sem);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ set_buffer_new(bh_result);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ up(&EXT3_I(inode)->truncate_sem);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ down(&EXT3_I(inode)->truncate_sem);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ up(&EXT3_I(inode)->truncate_sem);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = EXT_DEPTH(&tree);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-2.6.16.27-0.9/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/ialloc.c
-+++ linux-2.6.16.27-0.9/fs/ext3/ialloc.c
-@@ -601,7 +601,7 @@ got:
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -645,6 +645,18 @@ got:
- if (err)
- goto fail_free_drop;
-
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-2.6.16.27-0.9/fs/ext3/inode.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/inode.c
-+++ linux-2.6.16.27-0.9/fs/ext3/inode.c
-@@ -40,7 +40,7 @@
- #include "iopen.h"
- #include "acl.h"
-
--static int ext3_writepage_trans_blocks(struct inode *inode);
-+int ext3_writepage_trans_blocks(struct inode *inode);
-
- /*
- * Test whether an inode is a fast symlink.
-@@ -788,6 +788,17 @@ out:
- return err;
- }
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_block_handle(handle, inode, block, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -798,8 +809,8 @@ static int ext3_get_block(struct inode *
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 1);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 1);
- return ret;
- }
-
-@@ -843,7 +854,7 @@ ext3_direct_io_get_blocks(struct inode *
-
- get_block:
- if (ret == 0)
-- ret = ext3_get_block_handle(handle, inode, iblock,
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-@@ -863,7 +874,7 @@ struct buffer_head *ext3_getblk(handle_t
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+ *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1606,7 +1617,7 @@ void ext3_set_aops(struct inode *inode)
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2116,6 +2127,9 @@ void ext3_truncate(struct inode * inode)
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -2863,12 +2877,15 @@ err_out:
- * block and work out the exact number of indirects which are touched. Pah.
- */
-
--static int ext3_writepage_trans_blocks(struct inode *inode)
-+int ext3_writepage_trans_blocks(struct inode *inode)
- {
- int bpp = ext3_journal_blocks_per_page(inode);
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-Index: linux-2.6.16.27-0.9/fs/ext3/Makefile
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/Makefile
-+++ linux-2.6.16.27-0.9/fs/ext3/Makefile
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-2.6.16.27-0.9/fs/ext3/super.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/super.c
-+++ linux-2.6.16.27-0.9/fs/ext3/super.c
-@@ -392,6 +392,7 @@ static void ext3_put_super (struct super
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -456,6 +457,8 @@ static struct inode *ext3_alloc_inode(st
- #endif
- ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -681,6 +684,7 @@ enum {
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- Opt_grpquota
- };
-
-@@ -732,6 +736,9 @@ static match_table_t tokens = {
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -1073,6 +1080,15 @@ clear_qf_name:
- case Opt_nobh:
- set_opt(sbi->s_mount_opt, NOBH);
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1799,6 +1815,7 @@ static int ext3_fill_super (struct super
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
-+ ext3_ext_init(sb);
- lock_kernel();
- return 0;
-
-Index: linux-2.6.16.27-0.9/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/ioctl.c
-+++ linux-2.6.16.27-0.9/fs/ext3/ioctl.c
-@@ -125,6 +125,10 @@ flags_err:
- err = ext3_change_inode_journal_flag(inode, jflag);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-2.6.16.27-0.9/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs.h
-+++ linux-2.6.16.27-0.9/include/linux/ext3_fs.h
-@@ -185,9 +185,10 @@ struct ext3_group_desc
- #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
- /*
-@@ -237,6 +238,9 @@ struct ext3_new_group_data {
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Mount options
-@@ -377,6 +381,8 @@ struct ext3_inode {
- #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
- #define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x1000000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x2000000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -565,11 +571,13 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -776,6 +784,7 @@ extern unsigned long ext3_count_free (st
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *, struct address_space *, loff_t);
- int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -795,6 +804,7 @@ extern int ext3_get_inode_loc(struct ino
- extern void ext3_truncate (struct inode *);
- extern void ext3_set_inode_flags(struct inode *);
- extern void ext3_set_aops(struct inode *inode);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
-
- /* ioctl.c */
- extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
-@@ -848,6 +858,16 @@ extern struct inode_operations ext3_spec
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-2.6.16.27-0.9/include/linux/ext3_extents.h
-===================================================================
---- /dev/null
-+++ linux-2.6.16.27-0.9/include/linux/ext3_extents.h
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-2.6.16.27-0.9/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs_i.h
-+++ linux-2.6.16.27-0.9/include/linux/ext3_fs_i.h
-@@ -133,6 +133,8 @@ struct ext3_inode_info {
- */
- struct semaphore truncate_sem;
- struct inode vfs_inode;
-+
-+ __u32 i_cached_extent[4];
- };
-
- #endif /* _LINUX_EXT3_FS_I */
+++ /dev/null
-Index: linux-stage/fs/ext3/extents.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-stage/fs/ext3/extents.c 2006-07-16 14:10:21.000000000 +0800
-@@ -0,0 +1,2359 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+ (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ clear_buffer_new(bh_result);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ mutex_lock(&EXT3_I(inode)->truncate_mutex);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ set_buffer_new(bh_result);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ mutex_lock(&EXT3_I(inode)->truncate_mutex);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ mutex_lock(&EXT3_I(inode)->truncate_mutex);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ mutex_lock(&EXT3_I(inode)->truncate_mutex);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ mutex_lock(&EXT3_I(inode)->truncate_mutex);
-+ err = EXT_DEPTH(&tree);
-+ mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-stage/fs/ext3/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/ialloc.c 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/fs/ext3/ialloc.c 2006-07-16 14:10:20.000000000 +0800
-@@ -600,7 +600,7 @@ got:
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -644,6 +644,18 @@ got:
- if (err)
- goto fail_free_drop;
-
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/fs/ext3/inode.c 2006-07-16 14:11:28.000000000 +0800
-@@ -40,7 +40,7 @@
- #include "iopen.h"
- #include "acl.h"
-
--static int ext3_writepage_trans_blocks(struct inode *inode);
-+int ext3_writepage_trans_blocks(struct inode *inode);
-
- /*
- * Test whether an inode is a fast symlink.
-@@ -944,6 +944,17 @@ out:
-
- #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_blocks_handle(handle, inode, block, 1, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -984,8 +995,8 @@ static int ext3_get_block(struct inode *
-
- get_block:
- if (ret == 0) {
-- ret = ext3_get_blocks_handle(handle, inode, iblock,
-- max_blocks, bh_result, create, 0);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 0);
- if (ret > 0) {
- bh_result->b_size = (ret << inode->i_blkbits);
- ret = 0;
-@@ -1008,7 +1019,7 @@ struct buffer_head *ext3_getblk(handle_t
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- err = ext3_get_blocks_handle(handle, inode, block, 1,
-+ err = ext3_get_block_wrap(handle, inode, block,
- &dummy, create, 1);
- if (err == 1) {
- err = 0;
-@@ -1756,7 +1767,7 @@ void ext3_set_aops(struct inode *inode)
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-@@ -2260,6 +2271,9 @@ void ext3_truncate(struct inode *inode)
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -3004,12 +3018,15 @@ err_out:
- * block and work out the exact number of indirects which are touched. Pah.
- */
-
--static int ext3_writepage_trans_blocks(struct inode *inode)
-+int ext3_writepage_trans_blocks(struct inode *inode)
- {
- int bpp = ext3_journal_blocks_per_page(inode);
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-@@ -3277,7 +3294,7 @@ int ext3_prep_san_write(struct inode *in
-
- /* alloc blocks one by one */
- for (i = 0; i < nblocks; i++) {
-- ret = ext3_get_block_handle(handle, inode, blocks[i],
-+ ret = ext3_get_blocks_handle(handle, inode, blocks[i], 1,
- &bh_tmp, 1, 1);
- if (ret)
- break;
-@@ -3337,7 +3354,7 @@ int ext3_map_inode_page(struct inode *in
- if (blocks[i] != 0)
- continue;
-
-- rc = ext3_get_block_handle(handle, inode, iblock, &dummy, 1, 1);
-+ rc = ext3_get_blocks_handle(handle, inode, iblock, 1, &dummy, 1, 1);
- if (rc) {
- printk(KERN_INFO "ext3_map_inode_page: error reading "
- "block %ld\n", iblock);
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/fs/ext3/Makefile 2006-07-16 14:10:21.000000000 +0800
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/fs/ext3/super.c 2006-07-16 14:10:21.000000000 +0800
-@@ -391,6 +391,7 @@ static void ext3_put_super (struct super
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -455,6 +456,8 @@ static struct inode *ext3_alloc_inode(st
- #endif
- ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -638,6 +641,7 @@ enum {
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- Opt_grpquota
- };
-
-@@ -690,6 +694,9 @@ static match_table_t tokens = {
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -1035,6 +1041,15 @@ clear_qf_name:
- case Opt_bh:
- clear_opt(sbi->s_mount_opt, NOBH);
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1760,6 +1772,7 @@ static int ext3_fill_super (struct super
- test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
- "writeback");
-
-+ ext3_ext_init(sb);
- lock_kernel();
- return 0;
-
-Index: linux-stage/fs/ext3/ioctl.c
-===================================================================
---- linux-stage.orig/fs/ext3/ioctl.c 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/fs/ext3/ioctl.c 2006-07-16 13:55:31.000000000 +0800
-@@ -135,6 +135,10 @@ flags_err:
- mutex_unlock(&inode->i_mutex);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2006-07-16 13:55:31.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs.h 2006-07-16 14:10:21.000000000 +0800
-@@ -181,9 +181,10 @@ struct ext3_group_desc
- #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
- /*
-@@ -233,6 +234,9 @@ struct ext3_new_group_data {
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Mount options
-@@ -373,6 +377,8 @@ struct ext3_inode {
- #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
- #define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x1000000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x2000000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -563,11 +569,13 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -787,6 +795,9 @@ extern unsigned long ext3_count_free (st
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *,
-+ struct address_space *, loff_t);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
- int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
- struct buffer_head *bh, ext3_fsblk_t blocknr);
- struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-@@ -860,6 +870,16 @@ extern struct inode_operations ext3_spec
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-stage/include/linux/ext3_extents.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-stage/include/linux/ext3_extents.h 2006-07-16 13:55:31.000000000 +0800
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-stage/include/linux/ext3_fs_i.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_i.h 2006-07-16 13:55:30.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs_i.h 2006-07-16 14:10:20.000000000 +0800
-@@ -142,6 +142,8 @@ struct ext3_inode_info {
- */
- struct mutex truncate_mutex;
- struct inode vfs_inode;
-+
-+ __u32 i_cached_extent[4];
- };
-
- #endif /* _LINUX_EXT3_FS_I */
+++ /dev/null
-%patch
-Index: linux-2.6.5-sles9/fs/ext3/extents.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/extents.c 2005-02-17 22:07:57.023609040 +0300
-+++ linux-2.6.5-sles9/fs/ext3/extents.c 2005-02-23 01:02:37.396435640 +0300
-@@ -0,0 +1,2361 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
-+ (EXT_HDR_GEN(neh) + 1);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ __clear_bit(BH_New, &bh_result->b_state);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ down(&EXT3_I(inode)->truncate_sem);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ __set_bit(BH_New, &bh_result->b_state);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ __set_bit(BH_Mapped, &bh_result->b_state);
-+ bh_result->b_bdev = inode->i_sb->s_bdev;
-+ bh_result->b_blocknr = newblock;
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ up(&EXT3_I(inode)->truncate_sem);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ down(&EXT3_I(inode)->truncate_sem);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ up(&EXT3_I(inode)->truncate_sem);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = EXT_DEPTH(&tree);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-2.6.5-sles9/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/ialloc.c 2005-02-23 01:01:52.366281264 +0300
-+++ linux-2.6.5-sles9/fs/ext3/ialloc.c 2005-02-23 01:02:37.398435336 +0300
-@@ -566,7 +566,7 @@ repeat:
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -647,6 +647,18 @@
- DQUOT_FREE_INODE(inode);
- goto fail2;
- }
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-2.6.5-sles9/fs/ext3/inode.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/inode.c 2005-02-23 01:01:52.373280200 +0300
-+++ linux-2.6.5-sles9/fs/ext3/inode.c 2005-02-23 01:02:37.404434424 +0300
-@@ -796,6 +796,17 @@
- goto reread;
- }
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_block_handle(handle, inode, block, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -806,8 +817,8 @@
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 1);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 1);
- return ret;
- }
-
-@@ -833,8 +844,8 @@
- }
- }
- if (ret == 0)
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 0);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 0);
- if (ret == 0)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-@@ -855,7 +866,7 @@
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+ *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1587,7 +1598,7 @@
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2083,6 +2094,9 @@
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -2789,6 +2803,9 @@
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-Index: linux-2.6.5-sles9/fs/ext3/Makefile
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/Makefile 2005-02-23 01:01:46.501172896 +0300
-+++ linux-2.6.5-sles9/fs/ext3/Makefile 2005-02-23 01:02:37.405434272 +0300
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o
-+ ioctl.o namei.o super.o symlink.o hash.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-2.6.5-sles9/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/super.c 2005-02-23 01:02:34.072940888 +0300
-+++ linux-2.6.5-sles9/fs/ext3/super.c 2005-02-23 01:47:15.291333736 +0300
-@@ -389,6 +389,7 @@
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -447,6 +448,8 @@
- #endif
- ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -537,6 +540,7 @@
- Opt_ignore, Opt_barrier,
- Opt_err,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- };
-
- static match_table_t tokens = {
-@@ -582,6 +585,9 @@
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL}
- };
-@@ -797,6 +802,15 @@
- break;
- case Opt_ignore:
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1449,6 +1460,8 @@
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
-+ ext3_ext_init(sb);
-+
- return 0;
-
- failed_mount3:
-Index: linux-2.6.5-sles9/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/ioctl.c 2005-02-23 01:01:42.887722224 +0300
-+++ linux-2.6.5-sles9/fs/ext3/ioctl.c 2005-02-23 01:02:37.412433208 +0300
-@@ -124,6 +124,10 @@
- err = ext3_change_inode_journal_flag(inode, jflag);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int *) arg);
-Index: linux-2.6.5-sles9/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs.h 2005-02-23 01:02:35.823674736 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs.h 2005-02-23 01:02:37.414432904 +0300
-@@ -186,8 +186,9 @@
- #define EXT3_NOTAIL_FL 0x00008000 /* don't merge file tail */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
-@@ -211,6 +212,9 @@
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Structure of an inode on the disk
-@@ -333,6 +337,8 @@
- #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
- #define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -503,11 +509,13 @@
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -729,6 +735,9 @@
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *,
-+ struct address_space *, loff_t);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -802,6 +809,16 @@
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-2.6.5-sles9/include/linux/ext3_extents.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_extents.h 2005-02-17 22:07:57.023609040 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_extents.h 2005-02-23 01:02:37.416432600 +0300
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bigs of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+
-+
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-2.6.5-sles9/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs_i.h 2005-02-23 01:01:52.425272296 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs_i.h 2005-02-23 01:45:55.611446920 +0300
-@@ -19,6 +19,7 @@
- #include <linux/rwsem.h>
- #include <linux/rbtree.h>
- #include <linux/seqlock.h>
-+#include <linux/ext3_extents.h>
-
- struct reserve_window {
- __u32 _rsv_start; /* First byte reserved */
-@@ -128,6 +129,8 @@
- */
- struct semaphore truncate_sem;
- struct inode vfs_inode;
-+
-+ struct ext3_ext_cache i_cached_extent;
- };
-
- #endif /* _LINUX_EXT3_FS_I */
-
-%diffstat
- fs/ext3/Makefile | 2
- fs/ext3/extents.c | 2356 +++++++++++++++++++++++++++++++++++++++++++
- fs/ext3/ialloc.c | 4
- fs/ext3/inode.c | 29
- fs/ext3/ioctl.c | 4
- fs/ext3/super.c | 15
- include/linux/ext3_extents.h | 265 ++++
- include/linux/ext3_fs.h | 17
- include/linux/ext3_fs_i.h | 3
- 9 files changed, 2687 insertions(+), 8 deletions(-)
-
+++ /dev/null
-Index: linux-stage/fs/ext3/extents.c
-===================================================================
---- linux-stage.orig/fs/ext3/extents.c 2005-02-25 15:33:48.890198160 +0200
-+++ linux-stage/fs/ext3/extents.c 2005-02-25 15:33:48.917194056 +0200
-@@ -0,0 +1,2360 @@
-+/*
-+ * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+/*
-+ * Extents support for EXT3
-+ *
-+ * TODO:
-+ * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
-+ * - ext3_ext_calc_credits() could take 'mergable' into account
-+ * - ext3*_error() should be used in some situations
-+ * - find_goal() [to be tested and improved]
-+ * - smart tree reduction
-+ * - arch-independence
-+ * common on-disk format for big/little-endian arch
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/time.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highuid.h>
-+#include <linux/pagemap.h>
-+#include <linux/quotaops.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/ext3_extents.h>
-+#include <asm/uaccess.h>
-+
-+
-+static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
-+{
-+ if (eh->eh_magic != EXT3_EXT_MAGIC) {
-+ printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-+ (unsigned)eh->eh_magic);
-+ return -EIO;
-+ }
-+ if (eh->eh_max == 0) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-+ (unsigned)eh->eh_max);
-+ return -EIO;
-+ }
-+ if (eh->eh_entries > eh->eh_max) {
-+ printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-+ (unsigned)eh->eh_entries);
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
-+{
-+ int err;
-+
-+ if (handle->h_buffer_credits > needed)
-+ return handle;
-+ if (!ext3_journal_extend(handle, needed))
-+ return handle;
-+ err = ext3_journal_restart(handle, needed);
-+
-+ return handle;
-+}
-+
-+static int inline
-+ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->get_write_access)
-+ return tree->ops->get_write_access(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+static int inline
-+ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
-+{
-+ if (tree->ops->mark_buffer_dirty)
-+ return tree->ops->mark_buffer_dirty(h,tree->buffer);
-+ else
-+ return 0;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ */
-+static int ext3_ext_get_access(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err = ext3_journal_get_write_access(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_get_access_for_root(handle, tree);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * could return:
-+ * - EROFS
-+ * - ENOMEM
-+ * - EIO
-+ */
-+static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int err;
-+ if (path->p_bh) {
-+ /* path points to block */
-+ err =ext3_journal_dirty_metadata(handle, path->p_bh);
-+ } else {
-+ /* path points to leaf/index in inode body */
-+ err = ext3_ext_mark_root_dirty(handle, tree);
-+ }
-+ return err;
-+}
-+
-+static int inline
-+ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, struct ext3_extent *ex,
-+ int *err)
-+{
-+ int goal, depth, newblock;
-+ struct inode *inode;
-+
-+ EXT_ASSERT(tree);
-+ if (tree->ops->new_block)
-+ return tree->ops->new_block(handle, tree, path, ex, err);
-+
-+ inode = tree->inode;
-+ depth = EXT_DEPTH(tree);
-+ if (path && depth > 0) {
-+ goal = path[depth-1].p_block;
-+ } else {
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+
-+ bg_start = (ei->i_block_group *
-+ EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ goal = bg_start + colour;
-+ }
-+
-+ newblock = ext3_new_block(handle, inode, goal, err);
-+ return newblock;
-+}
-+
-+static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
-+ neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) <<
-+ EXT_HDR_GEN_BITS) |
-+ ((EXT_HDR_GEN(neh) + 1) & EXT_HDR_GEN_MASK);
-+}
-+
-+static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 6;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->inode->i_sb->s_blocksize -
-+ sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 5;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent);
-+#ifdef AGRESSIVE_TEST
-+ size = 3;
-+#endif
-+ return size;
-+}
-+
-+static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
-+{
-+ int size;
-+
-+ size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
-+ sizeof(struct ext3_extent_idx);
-+#ifdef AGRESSIVE_TEST
-+ size = 4;
-+#endif
-+ return size;
-+}
-+
-+static void ext3_ext_show_path(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int k, l = path->p_depth;
-+
-+ ext_debug(tree, "path:");
-+ for (k = 0; k <= l; k++, path++) {
-+ if (path->p_idx) {
-+ ext_debug(tree, " %d->%d", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+ } else if (path->p_ext) {
-+ ext_debug(tree, " %d:%d:%d",
-+ path->p_ext->ee_block,
-+ path->p_ext->ee_len,
-+ path->p_ext->ee_start);
-+ } else
-+ ext_debug(tree, " []");
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+#ifdef EXT_DEBUG
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *eh;
-+ struct ext3_extent *ex;
-+ int i;
-+
-+ if (!path)
-+ return;
-+
-+ eh = path[depth].p_hdr;
-+ ex = EXT_FIRST_EXTENT(eh);
-+
-+ for (i = 0; i < eh->eh_entries; i++, ex++) {
-+ ext_debug(tree, "%d:%d:%d ",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ }
-+ ext_debug(tree, "\n");
-+#endif
-+}
-+
-+static void ext3_ext_drop_refs(struct ext3_ext_path *path)
-+{
-+ int depth = path->p_depth;
-+ int i;
-+
-+ for (i = 0; i <= depth; i++, path++) {
-+ if (path->p_bh) {
-+ brelse(path->p_bh);
-+ path->p_bh = NULL;
-+ }
-+ }
-+}
-+
-+/*
-+ * binary search for closest index by given block
-+ */
-+static inline void
-+ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent_idx *ix;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_entries > 0);
-+
-+ ext_debug(tree, "binsearch for %d(idx): ", block);
-+
-+ path->p_idx = ix = EXT_FIRST_INDEX(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ix[l + k].ei_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ix += l;
-+ path->p_idx = ix;
-+ ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
-+
-+ while (l++ < r) {
-+ if (block < ix->ei_block)
-+ break;
-+ path->p_idx = ix++;
-+ }
-+ ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
-+ path->p_idx->ei_leaf);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent_idx *chix;
-+
-+ chix = ix = EXT_FIRST_INDEX(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ix++) {
-+ if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
-+ printk("k=%d, ix=0x%p, first=0x%p\n", k,
-+ ix, EXT_FIRST_INDEX(eh));
-+ printk("%u <= %u\n",
-+ ix->ei_block,ix[-1].ei_block);
-+ }
-+ EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
-+ if (block < ix->ei_block)
-+ break;
-+ chix = ix;
-+ }
-+ EXT_ASSERT(chix == path->p_idx);
-+ }
-+#endif
-+}
-+
-+/*
-+ * binary search for closest extent by given block
-+ */
-+static inline void
-+ext3_ext_binsearch(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, int block)
-+{
-+ struct ext3_extent_header *eh = path->p_hdr;
-+ struct ext3_extent *ex;
-+ int l = 0, k, r;
-+
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+
-+ if (eh->eh_entries == 0) {
-+ /*
-+ * this leaf is empty yet:
-+ * we get such a leaf in split/add case
-+ */
-+ return;
-+ }
-+
-+ ext_debug(tree, "binsearch for %d: ", block);
-+
-+ path->p_ext = ex = EXT_FIRST_EXTENT(eh);
-+
-+ r = k = eh->eh_entries;
-+ while (k > 1) {
-+ k = (r - l) / 2;
-+ if (block < ex[l + k].ee_block)
-+ r -= k;
-+ else
-+ l += k;
-+ ext_debug(tree, "%d:%d:%d ", k, l, r);
-+ }
-+
-+ ex += l;
-+ path->p_ext = ex;
-+ ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+ while (l++ < r) {
-+ if (block < ex->ee_block)
-+ break;
-+ path->p_ext = ex++;
-+ }
-+ ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
-+ path->p_ext->ee_start, path->p_ext->ee_len);
-+
-+#ifdef CHECK_BINSEARCH
-+ {
-+ struct ext3_extent *chex;
-+
-+ chex = ex = EXT_FIRST_EXTENT(eh);
-+ for (k = 0; k < eh->eh_entries; k++, ex++) {
-+ EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
-+ if (block < ex->ee_block)
-+ break;
-+ chex = ex;
-+ }
-+ EXT_ASSERT(chex == path->p_ext);
-+ }
-+#endif
-+}
-+
-+int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
-+{
-+ struct ext3_extent_header *eh;
-+
-+ BUG_ON(tree->buffer_len == 0);
-+ ext3_ext_get_access_for_root(handle, tree);
-+ eh = EXT_ROOT_HDR(tree);
-+ eh->eh_depth = 0;
-+ eh->eh_entries = 0;
-+ eh->eh_magic = EXT3_EXT_MAGIC;
-+ eh->eh_max = ext3_ext_space_root(tree);
-+ ext3_ext_mark_root_dirty(handle, tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return 0;
-+}
-+
-+struct ext3_ext_path *
-+ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ struct buffer_head *bh;
-+ int depth, i, ppos = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ eh = EXT_ROOT_HDR(tree);
-+ EXT_ASSERT(eh);
-+ if (ext3_ext_check_header(eh)) {
-+ /* don't free previously allocated path
-+ * -- caller should take care */
-+ path = NULL;
-+ goto err;
-+ }
-+
-+ i = depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* account possible depth increase */
-+ if (!path) {
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-+ GFP_NOFS);
-+ if (!path)
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[0].p_hdr = eh;
-+
-+ /* walk through the tree */
-+ while (i) {
-+ ext_debug(tree, "depth %d: num %d, max %d\n",
-+ ppos, eh->eh_entries, eh->eh_max);
-+ ext3_ext_binsearch_idx(tree, path + ppos, block);
-+ path[ppos].p_block = path[ppos].p_idx->ei_leaf;
-+ path[ppos].p_depth = i;
-+ path[ppos].p_ext = NULL;
-+
-+ bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
-+ if (!bh)
-+ goto err;
-+
-+ eh = EXT_BLOCK_HDR(bh);
-+ ppos++;
-+ EXT_ASSERT(ppos <= depth);
-+ path[ppos].p_bh = bh;
-+ path[ppos].p_hdr = eh;
-+ i--;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+ }
-+
-+ path[ppos].p_depth = i;
-+ path[ppos].p_hdr = eh;
-+ path[ppos].p_ext = NULL;
-+ path[ppos].p_idx = NULL;
-+
-+ if (ext3_ext_check_header(eh))
-+ goto err;
-+
-+ /* find extent */
-+ ext3_ext_binsearch(tree, path + ppos, block);
-+
-+ ext3_ext_show_path(tree, path);
-+
-+ return path;
-+
-+err:
-+ printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ return ERR_PTR(-EIO);
-+}
-+
-+/*
-+ * insert new index [logical;ptr] into the block at cupr
-+ * it check where to insert: before curp or after curp
-+ */
-+static int ext3_ext_insert_index(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *curp,
-+ int logical, int ptr)
-+{
-+ struct ext3_extent_idx *ix;
-+ int len, err;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ return err;
-+
-+ EXT_ASSERT(logical != curp->p_idx->ei_block);
-+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
-+ if (logical > curp->p_idx->ei_block) {
-+ /* insert after */
-+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
-+ len = (len - 1) * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d after: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ (curp->p_idx + 1), (curp->p_idx + 2));
-+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);
-+ }
-+ ix = curp->p_idx + 1;
-+ } else {
-+ /* insert before */
-+ len = len * sizeof(struct ext3_extent_idx);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert new index %d before: %d. "
-+ "move %d from 0x%p to 0x%p\n",
-+ logical, ptr, len,
-+ curp->p_idx, (curp->p_idx + 1));
-+ memmove(curp->p_idx + 1, curp->p_idx, len);
-+ ix = curp->p_idx;
-+ }
-+
-+ ix->ei_block = logical;
-+ ix->ei_leaf = ptr;
-+ ix->ei_leaf_hi = ix->ei_unused = 0;
-+ curp->p_hdr->eh_entries++;
-+
-+ EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
-+ EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
-+
-+ err = ext3_ext_dirty(handle, tree, curp);
-+ ext3_std_error(tree->inode->i_sb, err);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine inserts new subtree into the path, using free index entry
-+ * at depth 'at:
-+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
-+ * - makes decision where to split
-+ * - moves remaining extens and index entries (right to the split point)
-+ * into the newly allocated blocks
-+ * - initialize subtree
-+ */
-+static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext, int at)
-+{
-+ struct buffer_head *bh = NULL;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct ext3_extent *ex;
-+ int i = at, k, m, a;
-+ unsigned long newblock, oldblock, border;
-+ int *ablocks = NULL; /* array of allocated blocks */
-+ int err = 0;
-+
-+ /* make decision: where to split? */
-+ /* FIXME: now desicion is simplest: at current extent */
-+
-+ /* if current leaf will be splitted, then we should use
-+ * border from split point */
-+ EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
-+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ border = path[depth].p_ext[1].ee_block;
-+ ext_debug(tree, "leaf will be splitted."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ } else {
-+ border = newext->ee_block;
-+ ext_debug(tree, "leaf will be added."
-+ " next leaf starts at %d\n",
-+ (int)border);
-+ }
-+
-+ /*
-+ * if error occurs, then we break processing
-+ * and turn filesystem read-only. so, index won't
-+ * be inserted and tree will be in consistent
-+ * state. next mount will repair buffers too
-+ */
-+
-+ /*
-+ * get array to track all allocated blocks
-+ * we need this to handle errors and free blocks
-+ * upon them
-+ */
-+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
-+ if (!ablocks)
-+ return -ENOMEM;
-+ memset(ablocks, 0, sizeof(unsigned long) * depth);
-+
-+ /* allocate all needed blocks */
-+ ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
-+ for (a = 0; a < depth - at; a++) {
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ goto cleanup;
-+ ablocks[a] = newblock;
-+ }
-+
-+ /* initialize new leaf */
-+ newblock = ablocks[--a];
-+ EXT_ASSERT(newblock);
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 0;
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_depth = 0;
-+ ex = EXT_FIRST_EXTENT(neh);
-+
-+ /* move remain of path[depth] to the new leaf */
-+ EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
-+ /* start copy from next extent */
-+ /* TODO: we could do it by single memmove */
-+ m = 0;
-+ path[depth].p_ext++;
-+ while (path[depth].p_ext <=
-+ EXT_MAX_EXTENT(path[depth].p_hdr)) {
-+ ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
-+ path[depth].p_ext->ee_block,
-+ path[depth].p_ext->ee_start,
-+ path[depth].p_ext->ee_len,
-+ newblock);
-+ memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
-+ neh->eh_entries++;
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old leaf */
-+ if (m) {
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+ path[depth].p_hdr->eh_entries -= m;
-+ if ((err = ext3_ext_dirty(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ }
-+
-+ /* create intermediate indexes */
-+ k = depth - at - 1;
-+ EXT_ASSERT(k >= 0);
-+ if (k)
-+ ext_debug(tree, "create %d intermediate indices\n", k);
-+ /* insert new index into current index block */
-+ /* current depth stored in i var */
-+ i = depth - 1;
-+ while (k--) {
-+ oldblock = newblock;
-+ newblock = ablocks[--a];
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ goto cleanup;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh)))
-+ goto cleanup;
-+
-+ neh = EXT_BLOCK_HDR(bh);
-+ neh->eh_entries = 1;
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ neh->eh_depth = depth - i;
-+ fidx = EXT_FIRST_INDEX(neh);
-+ fidx->ei_block = border;
-+ fidx->ei_leaf = oldblock;
-+ fidx->ei_leaf_hi = fidx->ei_unused = 0;
-+
-+ ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
-+ i, newblock, border, oldblock);
-+ /* copy indexes */
-+ m = 0;
-+ path[i].p_idx++;
-+
-+ ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
-+ EXT_MAX_INDEX(path[i].p_hdr));
-+ EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
-+ EXT_LAST_INDEX(path[i].p_hdr));
-+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-+ ext_debug(tree, "%d: move %d:%d in new index %lu\n",
-+ i, path[i].p_idx->ei_block,
-+ path[i].p_idx->ei_leaf, newblock);
-+ memmove(++fidx, path[i].p_idx++,
-+ sizeof(struct ext3_extent_idx));
-+ neh->eh_entries++;
-+ EXT_ASSERT(neh->eh_entries <= neh->eh_max);
-+ m++;
-+ }
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto cleanup;
-+ brelse(bh);
-+ bh = NULL;
-+
-+ /* correct old index */
-+ if (m) {
-+ err = ext3_ext_get_access(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ path[i].p_hdr->eh_entries -= m;
-+ err = ext3_ext_dirty(handle, tree, path + i);
-+ if (err)
-+ goto cleanup;
-+ }
-+
-+ i--;
-+ }
-+
-+ /* insert new index */
-+ if (!err)
-+ err = ext3_ext_insert_index(handle, tree, path + at,
-+ border, newblock);
-+
-+cleanup:
-+ if (bh) {
-+ if (buffer_locked(bh))
-+ unlock_buffer(bh);
-+ brelse(bh);
-+ }
-+
-+ if (err) {
-+ /* free all allocated blocks in error case */
-+ for (i = 0; i < depth; i++) {
-+ if (!ablocks[i])
-+ continue;
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ }
-+ }
-+ kfree(ablocks);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine implements tree growing procedure:
-+ * - allocates new block
-+ * - moves top-level data (index block or leaf) into the new block
-+ * - initialize new top-level, creating index that points to the
-+ * just created block
-+ */
-+static int ext3_ext_grow_indepth(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp = path;
-+ struct ext3_extent_header *neh;
-+ struct ext3_extent_idx *fidx;
-+ struct buffer_head *bh;
-+ unsigned long newblock;
-+ int err = 0;
-+
-+ newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
-+ if (newblock == 0)
-+ return err;
-+
-+ bh = sb_getblk(tree->inode->i_sb, newblock);
-+ if (!bh) {
-+ err = -EIO;
-+ ext3_std_error(tree->inode->i_sb, err);
-+ return err;
-+ }
-+ lock_buffer(bh);
-+
-+ if ((err = ext3_journal_get_create_access(handle, bh))) {
-+ unlock_buffer(bh);
-+ goto out;
-+ }
-+
-+ /* move top-level index/leaf into new block */
-+ memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
-+
-+ /* set size of new block */
-+ neh = EXT_BLOCK_HDR(bh);
-+ /* old root could have indexes or leaves
-+ * so calculate eh_max right way */
-+ if (EXT_DEPTH(tree))
-+ neh->eh_max = ext3_ext_space_block_idx(tree);
-+ else
-+ neh->eh_max = ext3_ext_space_block(tree);
-+ neh->eh_magic = EXT3_EXT_MAGIC;
-+ set_buffer_uptodate(bh);
-+ unlock_buffer(bh);
-+
-+ if ((err = ext3_journal_dirty_metadata(handle, bh)))
-+ goto out;
-+
-+ /* create index in new top-level index: num,max,pointer */
-+ if ((err = ext3_ext_get_access(handle, tree, curp)))
-+ goto out;
-+
-+ curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
-+ curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
-+ curp->p_hdr->eh_entries = 1;
-+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-+ /* FIXME: it works, but actually path[0] can be index */
-+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
-+ curp->p_idx->ei_leaf = newblock;
-+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
-+
-+ neh = EXT_ROOT_HDR(tree);
-+ fidx = EXT_FIRST_INDEX(neh);
-+ ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
-+ neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
-+
-+ neh->eh_depth = path->p_depth + 1;
-+ err = ext3_ext_dirty(handle, tree, curp);
-+out:
-+ brelse(bh);
-+
-+ return err;
-+}
-+
-+/*
-+ * routine finds empty index and adds new leaf. if no free index found
-+ * then it requests in-depth growing
-+ */
-+static int ext3_ext_create_new_leaf(handle_t *handle,
-+ struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_ext_path *curp;
-+ int depth, i, err = 0;
-+
-+repeat:
-+ i = depth = EXT_DEPTH(tree);
-+
-+ /* walk up to the tree and look for free index entry */
-+ curp = path + depth;
-+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
-+ i--;
-+ curp--;
-+ }
-+
-+ /* we use already allocated block for index block
-+ * so, subsequent data blocks should be contigoues */
-+ if (EXT_HAS_FREE_INDEX(curp)) {
-+ /* if we found index with free entry, then use that
-+ * entry: create all needed subtree and add new leaf */
-+ err = ext3_ext_split(handle, tree, path, newext, i);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+ } else {
-+ /* tree is full, time to grow in depth */
-+ err = ext3_ext_grow_indepth(handle, tree, path, newext);
-+
-+ /* refill path */
-+ ext3_ext_drop_refs(path);
-+ path = ext3_ext_find_extent(tree, newext->ee_block, path);
-+ if (IS_ERR(path))
-+ err = PTR_ERR(path);
-+
-+ /*
-+ * only first (depth 0 -> 1) produces free space
-+ * in all other cases we have to split growed tree
-+ */
-+ depth = EXT_DEPTH(tree);
-+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
-+ /* now we need split */
-+ goto repeat;
-+ }
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
-+ * NOTE: it consider block number from index entry as
-+ * allocated block. thus, index entries have to be consistent
-+ * with leafs
-+ */
-+static unsigned long
-+ext3_ext_next_allocated_block(struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ if (depth == 0 && path->p_ext == NULL)
-+ return EXT_MAX_BLOCK;
-+
-+ /* FIXME: what if index isn't full ?! */
-+ while (depth >= 0) {
-+ if (depth == path->p_depth) {
-+ /* leaf */
-+ if (path[depth].p_ext !=
-+ EXT_LAST_EXTENT(path[depth].p_hdr))
-+ return path[depth].p_ext[1].ee_block;
-+ } else {
-+ /* index */
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ }
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
-+ */
-+static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth;
-+
-+ EXT_ASSERT(path != NULL);
-+ depth = path->p_depth;
-+
-+ /* zero-tree has no leaf blocks at all */
-+ if (depth == 0)
-+ return EXT_MAX_BLOCK;
-+
-+ /* go to index block */
-+ depth--;
-+
-+ while (depth >= 0) {
-+ if (path[depth].p_idx !=
-+ EXT_LAST_INDEX(path[depth].p_hdr))
-+ return path[depth].p_idx[1].ei_block;
-+ depth--;
-+ }
-+
-+ return EXT_MAX_BLOCK;
-+}
-+
-+/*
-+ * if leaf gets modified and modified extent is first in the leaf
-+ * then we have to correct all indexes above
-+ * TODO: do we need to correct tree in all cases?
-+ */
-+int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct ext3_extent_header *eh;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_extent *ex;
-+ unsigned long border;
-+ int k, err = 0;
-+
-+ eh = path[depth].p_hdr;
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(eh);
-+
-+ if (depth == 0) {
-+ /* there is no tree at all */
-+ return 0;
-+ }
-+
-+ if (ex != EXT_FIRST_EXTENT(eh)) {
-+ /* we correct tree if first leaf got modified only */
-+ return 0;
-+ }
-+
-+ /*
-+ * TODO: we need correction if border is smaller then current one
-+ */
-+ k = depth - 1;
-+ border = path[depth].p_ext->ee_block;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ return err;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ return err;
-+
-+ while (k--) {
-+ /* change all left-side indexes */
-+ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
-+ break;
-+ if ((err = ext3_ext_get_access(handle, tree, path + k)))
-+ break;
-+ path[k].p_idx->ei_block = border;
-+ if ((err = ext3_ext_dirty(handle, tree, path + k)))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int inline
-+ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
-+ return 0;
-+
-+#ifdef AGRESSIVE_TEST
-+ if (ex1->ee_len >= 4)
-+ return 0;
-+#endif
-+
-+ if (!tree->ops->mergable)
-+ return 1;
-+
-+ return tree->ops->mergable(ex1, ex2);
-+}
-+
-+/*
-+ * this routine tries to merge requsted extent into the existing
-+ * extent or inserts requested extent as new one into the tree,
-+ * creating new leaf in no-space case
-+ */
-+int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *newext)
-+{
-+ struct ext3_extent_header * eh;
-+ struct ext3_extent *ex, *fex;
-+ struct ext3_extent *nearex; /* nearest extent */
-+ struct ext3_ext_path *npath = NULL;
-+ int depth, len, err, next;
-+
-+ EXT_ASSERT(newext->ee_len > 0);
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(path[depth].p_hdr);
-+
-+ /* try to insert block into found extent and return */
-+ if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
-+ ext_debug(tree, "append %d block to %d:%d (from %d)\n",
-+ newext->ee_len, ex->ee_block, ex->ee_len,
-+ ex->ee_start);
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ return err;
-+ ex->ee_len += newext->ee_len;
-+ eh = path[depth].p_hdr;
-+ nearex = ex;
-+ goto merge;
-+ }
-+
-+repeat:
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max)
-+ goto has_space;
-+
-+ /* probably next leaf has space for us? */
-+ fex = EXT_LAST_EXTENT(eh);
-+ next = ext3_ext_next_leaf_block(tree, path);
-+ if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
-+ ext_debug(tree, "next leaf block - %d\n", next);
-+ EXT_ASSERT(!npath);
-+ npath = ext3_ext_find_extent(tree, next, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ EXT_ASSERT(npath->p_depth == path->p_depth);
-+ eh = npath[depth].p_hdr;
-+ if (eh->eh_entries < eh->eh_max) {
-+ ext_debug(tree, "next leaf isnt full(%d)\n",
-+ eh->eh_entries);
-+ path = npath;
-+ goto repeat;
-+ }
-+ ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
-+ eh->eh_entries, eh->eh_max);
-+ }
-+
-+ /*
-+ * there is no free space in found leaf
-+ * we're gonna add new leaf in the tree
-+ */
-+ err = ext3_ext_create_new_leaf(handle, tree, path, newext);
-+ if (err)
-+ goto cleanup;
-+ depth = EXT_DEPTH(tree);
-+ eh = path[depth].p_hdr;
-+
-+has_space:
-+ nearex = path[depth].p_ext;
-+
-+ if ((err = ext3_ext_get_access(handle, tree, path + depth)))
-+ goto cleanup;
-+
-+ if (!nearex) {
-+ /* there is no extent in this leaf, create first one */
-+ ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len);
-+ path[depth].p_ext = EXT_FIRST_EXTENT(eh);
-+ } else if (newext->ee_block > nearex->ee_block) {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ if (nearex != EXT_LAST_EXTENT(eh)) {
-+ len = EXT_MAX_EXTENT(eh) - nearex;
-+ len = (len - 1) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start,
-+ newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 2, nearex + 1, len);
-+ }
-+ path[depth].p_ext = nearex + 1;
-+ } else {
-+ EXT_ASSERT(newext->ee_block != nearex->ee_block);
-+ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
-+ len = len < 0 ? 0 : len;
-+ ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
-+ "move %d from 0x%p to 0x%p\n",
-+ newext->ee_block, newext->ee_start, newext->ee_len,
-+ nearex, len, nearex + 1, nearex + 2);
-+ memmove(nearex + 1, nearex, len);
-+ path[depth].p_ext = nearex;
-+ }
-+
-+ eh->eh_entries++;
-+ nearex = path[depth].p_ext;
-+ nearex->ee_block = newext->ee_block;
-+ nearex->ee_start = newext->ee_start;
-+ nearex->ee_len = newext->ee_len;
-+ /* FIXME: support for large fs */
-+ nearex->ee_start_hi = 0;
-+
-+merge:
-+ /* try to merge extents to the right */
-+ while (nearex < EXT_LAST_EXTENT(eh)) {
-+ if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
-+ break;
-+ /* merge with next extent! */
-+ nearex->ee_len += nearex[1].ee_len;
-+ if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-+ len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
-+ sizeof(struct ext3_extent);
-+ memmove(nearex + 1, nearex + 2, len);
-+ }
-+ eh->eh_entries--;
-+ EXT_ASSERT(eh->eh_entries > 0);
-+ }
-+
-+ /* try to merge extents to the left */
-+
-+ /* time to correct all indexes above */
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+ if (err)
-+ goto cleanup;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+
-+cleanup:
-+ if (npath) {
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+ }
-+ ext3_ext_tree_changed(tree);
-+ ext3_ext_invalidate_cache(tree);
-+ return err;
-+}
-+
-+int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
-+ unsigned long num, ext_prepare_callback func)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_ext_cache cbex;
-+ struct ext3_extent *ex;
-+ unsigned long next, start = 0, end = 0;
-+ unsigned long last = block + num;
-+ int depth, exists, err = 0;
-+
-+ EXT_ASSERT(tree);
-+ EXT_ASSERT(func);
-+ EXT_ASSERT(tree->inode);
-+ EXT_ASSERT(tree->root);
-+
-+ while (block < last && block != EXT_MAX_BLOCK) {
-+ num = last - block;
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(tree, block, path);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ break;
-+ }
-+
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ ex = path[depth].p_ext;
-+ next = ext3_ext_next_allocated_block(path);
-+
-+ exists = 0;
-+ if (!ex) {
-+ /* there is no extent yet, so try to allocate
-+ * all requested space */
-+ start = block;
-+ end = block + num;
-+ } else if (ex->ee_block > block) {
-+ /* need to allocate space before found extent */
-+ start = block;
-+ end = ex->ee_block;
-+ if (block + num < end)
-+ end = block + num;
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ /* need to allocate space after found extent */
-+ start = block;
-+ end = block + num;
-+ if (end >= next)
-+ end = next;
-+ } else if (block >= ex->ee_block) {
-+ /*
-+ * some part of requested space is covered
-+ * by found extent
-+ */
-+ start = block;
-+ end = ex->ee_block + ex->ee_len;
-+ if (block + num < end)
-+ end = block + num;
-+ exists = 1;
-+ } else {
-+ BUG();
-+ }
-+ EXT_ASSERT(end > start);
-+
-+ if (!exists) {
-+ cbex.ec_block = start;
-+ cbex.ec_len = end - start;
-+ cbex.ec_start = 0;
-+ cbex.ec_type = EXT3_EXT_CACHE_GAP;
-+ } else {
-+ cbex.ec_block = ex->ee_block;
-+ cbex.ec_len = ex->ee_len;
-+ cbex.ec_start = ex->ee_start;
-+ cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
-+ }
-+
-+ EXT_ASSERT(cbex.ec_len > 0);
-+ EXT_ASSERT(path[depth].p_hdr);
-+ err = func(tree, path, &cbex);
-+ ext3_ext_drop_refs(path);
-+
-+ if (err < 0)
-+ break;
-+ if (err == EXT_REPEAT)
-+ continue;
-+ else if (err == EXT_BREAK) {
-+ err = 0;
-+ break;
-+ }
-+
-+ if (EXT_DEPTH(tree) != depth) {
-+ /* depth was changed. we have to realloc path */
-+ kfree(path);
-+ path = NULL;
-+ }
-+
-+ block = cbex.ec_block + cbex.ec_len;
-+ }
-+
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+
-+ return err;
-+}
-+
-+static inline void
-+ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
-+ __u32 len, __u32 start, int type)
-+{
-+ EXT_ASSERT(len > 0);
-+ if (tree->cex) {
-+ tree->cex->ec_type = type;
-+ tree->cex->ec_block = block;
-+ tree->cex->ec_len = len;
-+ tree->cex->ec_start = start;
-+ }
-+}
-+
-+/*
-+ * this routine calculate boundaries of the gap requested block fits into
-+ * and cache this gap
-+ */
-+static inline void
-+ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ unsigned long block)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ unsigned long lblock, len;
-+ struct ext3_extent *ex;
-+
-+ if (!tree->cex)
-+ return;
-+
-+ ex = path[depth].p_ext;
-+ if (ex == NULL) {
-+ /* there is no extent yet, so gap is [0;-] */
-+ lblock = 0;
-+ len = EXT_MAX_BLOCK;
-+ ext_debug(tree, "cache gap(whole file):");
-+ } else if (block < ex->ee_block) {
-+ lblock = block;
-+ len = ex->ee_block - block;
-+ ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len);
-+ } else if (block >= ex->ee_block + ex->ee_len) {
-+ lblock = ex->ee_block + ex->ee_len;
-+ len = ext3_ext_next_allocated_block(path);
-+ ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) block);
-+ EXT_ASSERT(len > lblock);
-+ len = len - lblock;
-+ } else {
-+ lblock = len = 0;
-+ BUG();
-+ }
-+
-+ ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
-+ ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
-+}
-+
-+static inline int
-+ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
-+ struct ext3_extent *ex)
-+{
-+ struct ext3_ext_cache *cex = tree->cex;
-+
-+ /* is there cache storage at all? */
-+ if (!cex)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ /* has cache valid data? */
-+ if (cex->ec_type == EXT3_EXT_CACHE_NO)
-+ return EXT3_EXT_CACHE_NO;
-+
-+ EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
-+ cex->ec_type == EXT3_EXT_CACHE_EXTENT);
-+ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
-+ ex->ee_block = cex->ec_block;
-+ ex->ee_start = cex->ec_start;
-+ ex->ee_start_hi = 0;
-+ ex->ee_len = cex->ec_len;
-+ ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
-+ (unsigned long) block,
-+ (unsigned long) ex->ee_block,
-+ (unsigned long) ex->ee_len,
-+ (unsigned long) ex->ee_start);
-+ return cex->ec_type;
-+ }
-+
-+ /* not in cache */
-+ return EXT3_EXT_CACHE_NO;
-+}
-+
-+/*
-+ * routine removes index from the index block
-+ * it's used in truncate case only. thus all requests are for
-+ * last index in the block only
-+ */
-+int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ struct buffer_head *bh;
-+ int err;
-+
-+ /* free index block */
-+ path--;
-+ EXT_ASSERT(path->p_hdr->eh_entries);
-+ if ((err = ext3_ext_get_access(handle, tree, path)))
-+ return err;
-+ path->p_hdr->eh_entries--;
-+ if ((err = ext3_ext_dirty(handle, tree, path)))
-+ return err;
-+ ext_debug(tree, "index is empty, remove it, free block %d\n",
-+ path->p_idx->ei_leaf);
-+ bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
-+ ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ return err;
-+}
-+
-+int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path)
-+{
-+ int depth = EXT_DEPTH(tree);
-+ int needed;
-+
-+ if (path) {
-+ /* probably there is space in leaf? */
-+ if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
-+ return 1;
-+ }
-+
-+ /*
-+ * the worste case we're expecting is creation of the
-+ * new root (growing in depth) with index splitting
-+ * for splitting we have to consider depth + 1 because
-+ * previous growing could increase it
-+ */
-+ depth = depth + 1;
-+
-+ /*
-+ * growing in depth:
-+ * block allocation + new root + old root
-+ */
-+ needed = EXT3_ALLOC_NEEDED + 2;
-+
-+ /* index split. we may need:
-+ * allocate intermediate indexes and new leaf
-+ * change two blocks at each level, but root
-+ * modify root block (inode)
-+ */
-+ needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
-+
-+ return needed;
-+}
-+
-+static int
-+ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, tex;
-+ struct ext3_ext_path *npath;
-+ int depth, creds, err;
-+
-+ depth = EXT_DEPTH(tree);
-+ ex = path[depth].p_ext;
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
-+ EXT_ASSERT(ex->ee_block < start);
-+
-+ /* calculate tail extent */
-+ tex.ee_block = end + 1;
-+ EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
-+ tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
-+
-+ creds = ext3_ext_calc_credits_for_insert(tree, path);
-+ handle = ext3_ext_journal_restart(handle, creds);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ /* calculate head extent. use primary extent */
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+ ex->ee_len = start - ex->ee_block;
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ return err;
-+
-+ /* FIXME: some callback to free underlying resource
-+ * and correct ee_start? */
-+ ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
-+ ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
-+
-+ npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
-+ if (IS_ERR(npath))
-+ return PTR_ERR(npath);
-+ depth = EXT_DEPTH(tree);
-+ EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
-+ EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
-+
-+ err = ext3_ext_insert_extent(handle, tree, npath, &tex);
-+ ext3_ext_drop_refs(npath);
-+ kfree(npath);
-+
-+ return err;
-+}
-+
-+static int
-+ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path, unsigned long start,
-+ unsigned long end)
-+{
-+ struct ext3_extent *ex, *fu = NULL, *lu, *le;
-+ int err = 0, correct_index = 0;
-+ int depth = EXT_DEPTH(tree), credits;
-+ struct ext3_extent_header *eh;
-+ unsigned a, b, block, num;
-+
-+ ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
-+ if (!path[depth].p_hdr)
-+ path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+ eh = path[depth].p_hdr;
-+ EXT_ASSERT(eh);
-+ EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-+ EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-+
-+ /* find where to start removing */
-+ le = ex = EXT_LAST_EXTENT(eh);
-+ while (ex != EXT_FIRST_EXTENT(eh)) {
-+ if (ex->ee_block <= end)
-+ break;
-+ ex--;
-+ }
-+
-+ if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
-+ /* removal of internal part of the extent requested
-+ * tail and head must be placed in different extent
-+ * so, we have to insert one more extent */
-+ path[depth].p_ext = ex;
-+ return ext3_ext_split_for_rm(handle, tree, path, start, end);
-+ }
-+
-+ lu = ex;
-+ while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
-+ ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
-+ path[depth].p_ext = ex;
-+
-+ a = ex->ee_block > start ? ex->ee_block : start;
-+ b = ex->ee_block + ex->ee_len - 1 < end ?
-+ ex->ee_block + ex->ee_len - 1 : end;
-+
-+ ext_debug(tree, " border %u:%u\n", a, b);
-+
-+ if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
-+ block = 0;
-+ num = 0;
-+ BUG();
-+ } else if (a != ex->ee_block) {
-+ /* remove tail of the extent */
-+ block = ex->ee_block;
-+ num = a - block;
-+ } else if (b != ex->ee_block + ex->ee_len - 1) {
-+ /* remove head of the extent */
-+ block = a;
-+ num = b - a;
-+ } else {
-+ /* remove whole extent: excelent! */
-+ block = ex->ee_block;
-+ num = 0;
-+ EXT_ASSERT(a == ex->ee_block &&
-+ b == ex->ee_block + ex->ee_len - 1);
-+ }
-+
-+ if (ex == EXT_FIRST_EXTENT(eh))
-+ correct_index = 1;
-+
-+ credits = 1;
-+ if (correct_index)
-+ credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
-+ if (tree->ops->remove_extent_credits)
-+ credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
-+
-+ handle = ext3_ext_journal_restart(handle, credits);
-+ if (IS_ERR(handle)) {
-+ err = PTR_ERR(handle);
-+ goto out;
-+ }
-+
-+ err = ext3_ext_get_access(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ if (tree->ops->remove_extent)
-+ err = tree->ops->remove_extent(tree, ex, a, b);
-+ if (err)
-+ goto out;
-+
-+ if (num == 0) {
-+ /* this extent is removed entirely mark slot unused */
-+ ex->ee_start = ex->ee_start_hi = 0;
-+ eh->eh_entries--;
-+ fu = ex;
-+ }
-+
-+ ex->ee_block = block;
-+ ex->ee_len = num;
-+
-+ err = ext3_ext_dirty(handle, tree, path + depth);
-+ if (err)
-+ goto out;
-+
-+ ext_debug(tree, "new extent: %u:%u:%u\n",
-+ ex->ee_block, ex->ee_len, ex->ee_start);
-+ ex--;
-+ }
-+
-+ if (fu) {
-+ /* reuse unused slots */
-+ while (lu < le) {
-+ if (lu->ee_start) {
-+ *fu = *lu;
-+ lu->ee_start = lu->ee_start_hi = 0;
-+ fu++;
-+ }
-+ lu++;
-+ }
-+ }
-+
-+ if (correct_index && eh->eh_entries)
-+ err = ext3_ext_correct_indexes(handle, tree, path);
-+
-+ /* if this leaf is free, then we should
-+ * remove it from index block above */
-+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-+ err = ext3_ext_rm_idx(handle, tree, path + depth);
-+
-+out:
-+ return err;
-+}
-+
-+
-+static struct ext3_extent_idx *
-+ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
-+{
-+ struct ext3_extent_idx *ix;
-+
-+ ix = EXT_LAST_INDEX(hdr);
-+ while (ix != EXT_FIRST_INDEX(hdr)) {
-+ if (ix->ei_block <= block)
-+ break;
-+ ix--;
-+ }
-+ return ix;
-+}
-+
-+/*
-+ * returns 1 if current index have to be freed (even partial)
-+ */
-+static int inline
-+ext3_ext_more_to_rm(struct ext3_ext_path *path)
-+{
-+ EXT_ASSERT(path->p_idx);
-+
-+ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
-+ return 0;
-+
-+ /*
-+ * if truncate on deeper level happened it it wasn't partial
-+ * so we have to consider current index for truncation
-+ */
-+ if (path->p_hdr->eh_entries == path->p_block)
-+ return 0;
-+ return 1;
-+}
-+
-+int ext3_ext_remove_space(struct ext3_extents_tree *tree,
-+ unsigned long start, unsigned long end)
-+{
-+ struct inode *inode = tree->inode;
-+ struct super_block *sb = inode->i_sb;
-+ int depth = EXT_DEPTH(tree);
-+ struct ext3_ext_path *path;
-+ handle_t *handle;
-+ int i = 0, err = 0;
-+
-+ ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-+
-+ /* probably first extent we're gonna free will be last in block */
-+ handle = ext3_journal_start(inode, depth + 1);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ ext3_ext_invalidate_cache(tree);
-+
-+ /*
-+ * we start scanning from right side freeing all the blocks
-+ * after i_size and walking into the deep
-+ */
-+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
-+ if (IS_ERR(path)) {
-+ ext3_error(sb, __FUNCTION__, "Can't allocate path array");
-+ ext3_journal_stop(handle);
-+ return -ENOMEM;
-+ }
-+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
-+ path[i].p_hdr = EXT_ROOT_HDR(tree);
-+
-+ while (i >= 0 && err == 0) {
-+ if (i == depth) {
-+ /* this is leaf block */
-+ err = ext3_ext_rm_leaf(handle, tree, path, start, end);
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ continue;
-+ }
-+
-+ /* this is index block */
-+ if (!path[i].p_hdr) {
-+ ext_debug(tree, "initialize header\n");
-+ path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
-+ }
-+
-+ EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-+ EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
-+
-+ if (!path[i].p_idx) {
-+ /* this level hasn't touched yet */
-+ path[i].p_idx =
-+ ext3_ext_last_covered(path[i].p_hdr, end);
-+ path[i].p_block = path[i].p_hdr->eh_entries + 1;
-+ ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
-+ path[i].p_hdr, path[i].p_hdr->eh_entries);
-+ } else {
-+ /* we've already was here, see at next index */
-+ path[i].p_idx--;
-+ }
-+
-+ ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
-+ i, EXT_FIRST_INDEX(path[i].p_hdr),
-+ path[i].p_idx);
-+ if (ext3_ext_more_to_rm(path + i)) {
-+ /* go to the next level */
-+ ext_debug(tree, "move to level %d (block %d)\n",
-+ i + 1, path[i].p_idx->ei_leaf);
-+ memset(path + i + 1, 0, sizeof(*path));
-+ path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
-+ if (!path[i+1].p_bh) {
-+ /* should we reset i_size? */
-+ err = -EIO;
-+ break;
-+ }
-+ /* put actual number of indexes to know is this
-+ * number got changed at the next iteration */
-+ path[i].p_block = path[i].p_hdr->eh_entries;
-+ i++;
-+ } else {
-+ /* we finish processing this index, go up */
-+ if (path[i].p_hdr->eh_entries == 0 && i > 0) {
-+ /* index is empty, remove it
-+ * handle must be already prepared by the
-+ * truncatei_leaf() */
-+ err = ext3_ext_rm_idx(handle, tree, path + i);
-+ }
-+ /* root level have p_bh == NULL, brelse() eats this */
-+ brelse(path[i].p_bh);
-+ i--;
-+ ext_debug(tree, "return to level %d\n", i);
-+ }
-+ }
-+
-+ /* TODO: flexible tree reduction should be here */
-+ if (path->p_hdr->eh_entries == 0) {
-+ /*
-+ * truncate to zero freed all the tree
-+ * so, we need to correct eh_depth
-+ */
-+ err = ext3_ext_get_access(handle, tree, path);
-+ if (err == 0) {
-+ EXT_ROOT_HDR(tree)->eh_depth = 0;
-+ EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
-+ err = ext3_ext_dirty(handle, tree, path);
-+ }
-+ }
-+ ext3_ext_tree_changed(tree);
-+
-+ kfree(path);
-+ ext3_journal_stop(handle);
-+
-+ return err;
-+}
-+
-+int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
-+{
-+ int lcap, icap, rcap, leafs, idxs, num;
-+
-+ rcap = ext3_ext_space_root(tree);
-+ if (blocks <= rcap) {
-+ /* all extents fit to the root */
-+ return 0;
-+ }
-+
-+ rcap = ext3_ext_space_root_idx(tree);
-+ lcap = ext3_ext_space_block(tree);
-+ icap = ext3_ext_space_block_idx(tree);
-+
-+ num = leafs = (blocks + lcap - 1) / lcap;
-+ if (leafs <= rcap) {
-+ /* all pointers to leafs fit to the root */
-+ return leafs;
-+ }
-+
-+ /* ok. we need separate index block(s) to link all leaf blocks */
-+ idxs = (leafs + icap - 1) / icap;
-+ do {
-+ num += idxs;
-+ idxs = (idxs + icap - 1) / icap;
-+ } while (idxs > rcap);
-+
-+ return num;
-+}
-+
-+/*
-+ * called at mount time
-+ */
-+void ext3_ext_init(struct super_block *sb)
-+{
-+ /*
-+ * possible initialization would be here
-+ */
-+
-+ if (test_opt(sb, EXTENTS)) {
-+ printk("EXT3-fs: file extents enabled");
-+#ifdef AGRESSIVE_TEST
-+ printk(", agressive tests");
-+#endif
-+#ifdef CHECK_BINSEARCH
-+ printk(", check binsearch");
-+#endif
-+ printk("\n");
-+ }
-+}
-+
-+/*
-+ * called at umount time
-+ */
-+void ext3_ext_release(struct super_block *sb)
-+{
-+}
-+
-+/************************************************************************
-+ * VFS related routines
-+ ************************************************************************/
-+
-+static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
-+{
-+ /* we use in-core data, not bh */
-+ return 0;
-+}
-+
-+static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
-+{
-+ struct inode *inode = buffer;
-+ return ext3_mark_inode_dirty(handle, inode);
-+}
-+
-+static int ext3_ext_mergable(struct ext3_extent *ex1,
-+ struct ext3_extent *ex2)
-+{
-+ /* FIXME: support for large fs */
-+ if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
-+ return 1;
-+ return 0;
-+}
-+
-+static int
-+ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed;
-+
-+ /* at present, extent can't cross block group */;
-+ needed = 4; /* bitmap + group desc + sb + inode */
-+
-+#ifdef CONFIG_QUOTA
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+ return needed;
-+}
-+
-+static int
-+ext3_remove_blocks(struct ext3_extents_tree *tree,
-+ struct ext3_extent *ex,
-+ unsigned long from, unsigned long to)
-+{
-+ int needed = ext3_remove_blocks_credits(tree, ex, from, to);
-+ handle_t *handle = ext3_journal_start(tree->inode, needed);
-+ struct buffer_head *bh;
-+ int i;
-+
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+ if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
-+ /* tail removal */
-+ unsigned long num, start;
-+ num = ex->ee_block + ex->ee_len - from;
-+ start = ex->ee_start + ex->ee_len - num;
-+ ext_debug(tree, "free last %lu blocks starting %lu\n",
-+ num, start);
-+ for (i = 0; i < num; i++) {
-+ bh = sb_find_get_block(tree->inode->i_sb, start + i);
-+ ext3_forget(handle, 0, tree->inode, bh, start + i);
-+ }
-+ ext3_free_blocks(handle, tree->inode, start, num);
-+ } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
-+ printk("strange request: removal %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ } else {
-+ printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-+ from, to, ex->ee_block, ex->ee_len);
-+ }
-+ ext3_journal_stop(handle);
-+ return 0;
-+}
-+
-+static int ext3_ext_find_goal(struct inode *inode,
-+ struct ext3_ext_path *path, unsigned long block)
-+{
-+ struct ext3_inode_info *ei = EXT3_I(inode);
-+ unsigned long bg_start;
-+ unsigned long colour;
-+ int depth;
-+
-+ if (path) {
-+ struct ext3_extent *ex;
-+ depth = path->p_depth;
-+
-+ /* try to predict block placement */
-+ if ((ex = path[depth].p_ext))
-+ return ex->ee_start + (block - ex->ee_block);
-+
-+ /* it looks index is empty
-+ * try to find starting from index itself */
-+ if (path[depth].p_bh)
-+ return path[depth].p_bh->b_blocknr;
-+ }
-+
-+ /* OK. use inode's group */
-+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
-+ colour = (current->pid % 16) *
-+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-+ return bg_start + colour + block;
-+}
-+
-+static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_extent *ex, int *err)
-+{
-+ struct inode *inode = tree->inode;
-+ int newblock, goal;
-+
-+ EXT_ASSERT(path);
-+ EXT_ASSERT(ex);
-+ EXT_ASSERT(ex->ee_start);
-+ EXT_ASSERT(ex->ee_len);
-+
-+ /* reuse block from the extent to order data/metadata */
-+ newblock = ex->ee_start++;
-+ ex->ee_len--;
-+ if (ex->ee_len == 0) {
-+ ex->ee_len = 1;
-+ /* allocate new block for the extent */
-+ goal = ext3_ext_find_goal(inode, path, ex->ee_block);
-+ ex->ee_start = ext3_new_block(handle, inode, goal, err);
-+ ex->ee_start_hi = 0;
-+ if (ex->ee_start == 0) {
-+ /* error occured: restore old extent */
-+ ex->ee_start = newblock;
-+ return 0;
-+ }
-+ }
-+ return newblock;
-+}
-+
-+static struct ext3_extents_helpers ext3_blockmap_helpers = {
-+ .get_write_access = ext3_get_inode_write_access,
-+ .mark_buffer_dirty = ext3_mark_buffer_dirty,
-+ .mergable = ext3_ext_mergable,
-+ .new_block = ext3_new_block_cb,
-+ .remove_extent = ext3_remove_blocks,
-+ .remove_extent_credits = ext3_remove_blocks_credits,
-+};
-+
-+void ext3_init_tree_desc(struct ext3_extents_tree *tree,
-+ struct inode *inode)
-+{
-+ tree->inode = inode;
-+ tree->root = (void *) EXT3_I(inode)->i_data;
-+ tree->buffer = (void *) inode;
-+ tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
-+ tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
-+ tree->ops = &ext3_blockmap_helpers;
-+}
-+
-+int ext3_ext_get_block(handle_t *handle, struct inode *inode,
-+ long iblock, struct buffer_head *bh_result,
-+ int create, int extend_disksize)
-+{
-+ struct ext3_ext_path *path = NULL;
-+ struct ext3_extent newex;
-+ struct ext3_extent *ex;
-+ int goal, newblock, err = 0, depth;
-+ struct ext3_extents_tree tree;
-+
-+ clear_buffer_new(bh_result);
-+ ext3_init_tree_desc(&tree, inode);
-+ ext_debug(&tree, "block %d requested for inode %u\n",
-+ (int) iblock, (unsigned) inode->i_ino);
-+ down(&EXT3_I(inode)->truncate_sem);
-+
-+ /* check in cache */
-+ if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
-+ if (goal == EXT3_EXT_CACHE_GAP) {
-+ if (!create) {
-+ /* block isn't allocated yet and
-+ * user don't want to allocate it */
-+ goto out2;
-+ }
-+ /* we should allocate requested block */
-+ } else if (goal == EXT3_EXT_CACHE_EXTENT) {
-+ /* block is already allocated */
-+ newblock = iblock - newex.ee_block + newex.ee_start;
-+ goto out;
-+ } else {
-+ EXT_ASSERT(0);
-+ }
-+ }
-+
-+ /* find extent for this block */
-+ path = ext3_ext_find_extent(&tree, iblock, NULL);
-+ if (IS_ERR(path)) {
-+ err = PTR_ERR(path);
-+ path = NULL;
-+ goto out2;
-+ }
-+
-+ depth = EXT_DEPTH(&tree);
-+
-+ /*
-+ * consistent leaf must not be empty
-+ * this situations is possible, though, _during_ tree modification
-+ * this is why assert can't be put in ext3_ext_find_extent()
-+ */
-+ EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
-+
-+ if ((ex = path[depth].p_ext)) {
-+ /* if found exent covers block, simple return it */
-+ if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
-+ newblock = iblock - ex->ee_block + ex->ee_start;
-+ ext_debug(&tree, "%d fit into %d:%d -> %d\n",
-+ (int) iblock, ex->ee_block, ex->ee_len,
-+ newblock);
-+ ext3_ext_put_in_cache(&tree, ex->ee_block,
-+ ex->ee_len, ex->ee_start,
-+ EXT3_EXT_CACHE_EXTENT);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * requested block isn't allocated yet
-+ * we couldn't try to create block if create flag is zero
-+ */
-+ if (!create) {
-+ /* put just found gap into cache to speedup subsequest reqs */
-+ ext3_ext_put_gap_in_cache(&tree, path, iblock);
-+ goto out2;
-+ }
-+
-+ /* allocate new block */
-+ goal = ext3_ext_find_goal(inode, path, iblock);
-+ newblock = ext3_new_block(handle, inode, goal, &err);
-+ if (!newblock)
-+ goto out2;
-+ ext_debug(&tree, "allocate new block: goal %d, found %d\n",
-+ goal, newblock);
-+
-+ /* try to insert new extent into found leaf and return */
-+ newex.ee_block = iblock;
-+ newex.ee_start = newblock;
-+ newex.ee_start_hi = 0;
-+ newex.ee_len = 1;
-+ err = ext3_ext_insert_extent(handle, &tree, path, &newex);
-+ if (err)
-+ goto out2;
-+
-+ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+
-+ /* previous routine could use block we allocated */
-+ newblock = newex.ee_start;
-+ set_buffer_new(bh_result);
-+
-+ ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
-+ newex.ee_start, EXT3_EXT_CACHE_EXTENT);
-+out:
-+ ext3_ext_show_leaf(&tree, path);
-+ map_bh(bh_result, inode->i_sb, newblock);
-+out2:
-+ if (path) {
-+ ext3_ext_drop_refs(path);
-+ kfree(path);
-+ }
-+ up(&EXT3_I(inode)->truncate_sem);
-+
-+ return err;
-+}
-+
-+void ext3_ext_truncate(struct inode * inode, struct page *page)
-+{
-+ struct address_space *mapping = inode->i_mapping;
-+ struct super_block *sb = inode->i_sb;
-+ struct ext3_extents_tree tree;
-+ unsigned long last_block;
-+ handle_t *handle;
-+ int err = 0;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ /*
-+ * probably first extent we're gonna free will be last in block
-+ */
-+ err = ext3_writepage_trans_blocks(inode) + 3;
-+ handle = ext3_journal_start(inode, err);
-+ if (IS_ERR(handle)) {
-+ if (page) {
-+ clear_highpage(page);
-+ flush_dcache_page(page);
-+ unlock_page(page);
-+ page_cache_release(page);
-+ }
-+ return;
-+ }
-+
-+ if (page)
-+ ext3_block_truncate_page(handle, page, mapping, inode->i_size);
-+
-+ down(&EXT3_I(inode)->truncate_sem);
-+ ext3_ext_invalidate_cache(&tree);
-+
-+ /*
-+ * TODO: optimization is possible here
-+ * probably we need not scaning at all,
-+ * because page truncation is enough
-+ */
-+ if (ext3_orphan_add(handle, inode))
-+ goto out_stop;
-+
-+ /* we have to know where to truncate from in crash case */
-+ EXT3_I(inode)->i_disksize = inode->i_size;
-+ ext3_mark_inode_dirty(handle, inode);
-+
-+ last_block = (inode->i_size + sb->s_blocksize - 1) >>
-+ EXT3_BLOCK_SIZE_BITS(sb);
-+ err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
-+
-+ /* In a multi-transaction truncate, we only make the final
-+ * transaction synchronous */
-+ if (IS_SYNC(inode))
-+ handle->h_sync = 1;
-+
-+out_stop:
-+ /*
-+ * If this was a simple ftruncate(), and the file will remain alive
-+ * then we need to clear up the orphan record which we created above.
-+ * However, if this was a real unlink then we were called by
-+ * ext3_delete_inode(), and we allow that function to clean up the
-+ * orphan info for us.
-+ */
-+ if (inode->i_nlink)
-+ ext3_orphan_del(handle, inode);
-+
-+ up(&EXT3_I(inode)->truncate_sem);
-+ ext3_journal_stop(handle);
-+}
-+
-+/*
-+ * this routine calculate max number of blocks we could modify
-+ * in order to allocate new block for an inode
-+ */
-+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
-+{
-+ struct ext3_extents_tree tree;
-+ int needed;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+
-+ needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
-+
-+ /* caller want to allocate num blocks */
-+ needed *= num;
-+
-+#ifdef CONFIG_QUOTA
-+ /*
-+ * FIXME: real calculation should be here
-+ * it depends on blockmap format of qouta file
-+ */
-+ needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return needed;
-+}
-+
-+void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ ext3_extent_tree_init(handle, &tree);
-+}
-+
-+int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
-+{
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ return ext3_ext_calc_metadata_amount(&tree, blocks);
-+}
-+
-+static int
-+ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *newex)
-+{
-+ struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
-+
-+ if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ if (buf->err < 0)
-+ return EXT_BREAK;
-+ if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
-+ return EXT_BREAK;
-+
-+ if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
-+ buf->err++;
-+ buf->cur += sizeof(*newex);
-+ } else {
-+ buf->err = -EFAULT;
-+ return EXT_BREAK;
-+ }
-+ return EXT_CONTINUE;
-+}
-+
-+static int
-+ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
-+ struct ext3_ext_path *path,
-+ struct ext3_ext_cache *ex)
-+{
-+ struct ext3_extent_tree_stats *buf =
-+ (struct ext3_extent_tree_stats *) tree->private;
-+ int depth;
-+
-+ if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
-+ return EXT_CONTINUE;
-+
-+ depth = EXT_DEPTH(tree);
-+ buf->extents_num++;
-+ if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
-+ buf->leaf_num++;
-+ return EXT_CONTINUE;
-+}
-+
-+int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int err = 0;
-+
-+ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
-+ return -EINVAL;
-+
-+ if (cmd == EXT3_IOC_GET_EXTENTS) {
-+ struct ext3_extent_buf buf;
-+ struct ext3_extents_tree tree;
-+
-+ if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
-+ return -EFAULT;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ buf.cur = buf.buffer;
-+ buf.err = 0;
-+ tree.private = &buf;
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
-+ ext3_ext_store_extent_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (err == 0)
-+ err = buf.err;
-+ } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
-+ struct ext3_extent_tree_stats buf;
-+ struct ext3_extents_tree tree;
-+
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ buf.depth = EXT_DEPTH(&tree);
-+ buf.extents_num = 0;
-+ buf.leaf_num = 0;
-+ tree.private = &buf;
-+ err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
-+ ext3_ext_collect_stats_cb);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ if (!err)
-+ err = copy_to_user((void *) arg, &buf, sizeof(buf));
-+ } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
-+ struct ext3_extents_tree tree;
-+ ext3_init_tree_desc(&tree, inode);
-+ down(&EXT3_I(inode)->truncate_sem);
-+ err = EXT_DEPTH(&tree);
-+ up(&EXT3_I(inode)->truncate_sem);
-+ }
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(ext3_init_tree_desc);
-+EXPORT_SYMBOL(ext3_mark_inode_dirty);
-+EXPORT_SYMBOL(ext3_ext_invalidate_cache);
-+EXPORT_SYMBOL(ext3_ext_insert_extent);
-+EXPORT_SYMBOL(ext3_ext_walk_space);
-+EXPORT_SYMBOL(ext3_ext_find_goal);
-+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
-Index: linux-stage/fs/ext3/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/ialloc.c 2005-02-25 14:50:50.304202816 +0200
-+++ linux-stage/fs/ext3/ialloc.c 2005-02-25 15:33:48.920193600 +0200
-@@ -566,7 +566,7 @@ repeat:
- ei->i_dir_start_lookup = 0;
- ei->i_disksize = 0;
-
-- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-+ ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
- if (S_ISLNK(mode))
- ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
- /* dirsync only applies to directories */
-@@ -646,6 +646,18 @@
- DQUOT_FREE_INODE(inode);
- goto fail2;
- }
-+ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
-+ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
-+ ext3_extents_initialize_blockmap(handle, inode);
-+ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
-+ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-+ if (err) goto fail;
-+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
-+ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-+ }
-+ }
-+
- err = ext3_mark_inode_dirty(handle, inode);
- if (err) {
- ext3_std_error(sb, err);
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2005-02-25 14:50:50.309202056 +0200
-+++ linux-stage/fs/ext3/inode.c 2005-02-25 15:36:51.846384592 +0200
-@@ -796,6 +796,17 @@
- goto reread;
- }
-
-+static inline int
-+ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
-+ struct buffer_head *bh, int create, int extend_disksize)
-+{
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_get_block(handle, inode, block, bh, create,
-+ extend_disksize);
-+ return ext3_get_block_handle(handle, inode, block, bh, create,
-+ extend_disksize);
-+}
-+
- static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-@@ -806,8 +817,8 @@
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
-- ret = ext3_get_block_handle(handle, inode, iblock,
-- bh_result, create, 1);
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
-+ bh_result, create, 1);
- return ret;
- }
-
-@@ -851,7 +862,7 @@
-
- get_block:
- if (ret == 0)
-- ret = ext3_get_block_handle(handle, inode, iblock,
-+ ret = ext3_get_block_wrap(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-@@ -871,7 +882,7 @@
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- buffer_trace_init(&dummy.b_history);
-- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-+ *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-@@ -1589,7 +1600,7 @@
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
--static int ext3_block_truncate_page(handle_t *handle, struct page *page,
-+int ext3_block_truncate_page(handle_t *handle, struct page *page,
- struct address_space *mapping, loff_t from)
- {
- unsigned long index = from >> PAGE_CACHE_SHIFT;
-@@ -2087,6 +2098,9 @@
- return;
- }
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_truncate(inode, page);
-+
- handle = start_transaction(inode);
- if (IS_ERR(handle)) {
- if (page) {
-@@ -2814,6 +2828,9 @@
- int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
- int ret;
-
-+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
-+ return ext3_ext_writepage_trans_blocks(inode, bpp);
-+
- if (ext3_should_journal_data(inode))
- ret = 3 * (bpp + indirects) + 2;
- else
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile 2005-02-25 14:49:42.168561008 +0200
-+++ linux-stage/fs/ext3/Makefile 2005-02-25 15:39:28.384587168 +0200
-@@ -5,7 +5,8 @@
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
-- ioctl.o namei.o super.o symlink.o hash.o resize.o
-+ ioctl.o namei.o super.o symlink.o hash.o resize.o \
-+ extents.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2005-02-25 14:52:33.550506992 +0200
-+++ linux-stage/fs/ext3/super.c 2005-02-25 15:38:10.474431312 +0200
-@@ -394,6 +394,7 @@
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
- if (!(sb->s_flags & MS_RDONLY)) {
-@@ -457,6 +458,8 @@
- #endif
- ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
- ei->vfs_inode.i_version = 1;
-+
-+ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
- return &ei->vfs_inode;
- }
-
-@@ -589,6 +594,7 @@
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
-+ Opt_extents, Opt_noextents, Opt_extdebug,
- };
-
- static match_table_t tokens = {
-@@ -639,6 +644,9 @@
- {Opt_iopen, "iopen"},
- {Opt_noiopen, "noiopen"},
- {Opt_iopen_nopriv, "iopen_nopriv"},
-+ {Opt_extents, "extents"},
-+ {Opt_noextents, "noextents"},
-+ {Opt_extdebug, "extdebug"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -943,6 +950,15 @@
- match_int(&args[0], &option);
- *n_blocks_count = option;
- break;
-+ case Opt_extents:
-+ set_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_noextents:
-+ clear_opt (sbi->s_mount_opt, EXTENTS);
-+ break;
-+ case Opt_extdebug:
-+ set_opt (sbi->s_mount_opt, EXTDEBUG);
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1625,6 +1638,8 @@
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
-+ ext3_ext_init(sb);
-+
- return 0;
-
- failed_mount3:
-Index: linux-stage/fs/ext3/ioctl.c
-===================================================================
---- linux-stage.orig/fs/ext3/ioctl.c 2005-02-25 14:37:28.971023976 +0200
-+++ linux-stage/fs/ext3/ioctl.c 2005-02-25 15:33:48.938190864 +0200
-@@ -124,6 +124,10 @@
- err = ext3_change_inode_journal_flag(inode, jflag);
- return err;
- }
-+ case EXT3_IOC_GET_EXTENTS:
-+ case EXT3_IOC_GET_TREE_STATS:
-+ case EXT3_IOC_GET_TREE_DEPTH:
-+ return ext3_ext_ioctl(inode, filp, cmd, arg);
- case EXT3_IOC_GETVERSION:
- case EXT3_IOC_GETVERSION_OLD:
- return put_user(inode->i_generation, (int __user *) arg);
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2005-02-25 14:53:56.424908168 +0200
-+++ linux-stage/include/linux/ext3_fs.h 2005-02-25 15:39:12.841950008 +0200
-@@ -186,8 +186,9 @@
- #define EXT3_NOTAIL_FL 0x00008000 /* don't merge file tail */
- #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
- #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
- #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-
--#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
-+#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
- #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
-@@ -237,6 +238,9 @@
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
-+#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
-+#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
-
- /*
- * Structure of an inode on the disk
-@@ -359,6 +363,8 @@
- #define EXT3_MOUNT_RESERVATION 0x20000 /* Preallocation */
- #define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-+#define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
-+#define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
-@@ -503,11 +509,13 @@
- #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
- #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
- #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
-+#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
-
- #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
- #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
- EXT3_FEATURE_INCOMPAT_RECOVER| \
-- EXT3_FEATURE_INCOMPAT_META_BG)
-+ EXT3_FEATURE_INCOMPAT_META_BG| \
-+ EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-@@ -756,6 +763,9 @@
-
-
- /* inode.c */
-+extern int ext3_block_truncate_page(handle_t *, struct page *,
-+ struct address_space *, loff_t);
-+extern int ext3_writepage_trans_blocks(struct inode *inode);
- extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
- extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
- extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-@@ -836,6 +844,16 @@
- extern struct inode_operations ext3_symlink_inode_operations;
- extern struct inode_operations ext3_fast_symlink_inode_operations;
-
-+/* extents.c */
-+extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
-+extern int ext3_ext_get_block(handle_t *, struct inode *, long,
-+ struct buffer_head *, int, int);
-+extern void ext3_ext_truncate(struct inode *, struct page *);
-+extern void ext3_ext_init(struct super_block *);
-+extern void ext3_ext_release(struct super_block *);
-+extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
-+extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-
- #endif /* __KERNEL__ */
-
-Index: linux-stage/include/linux/ext3_extents.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_extents.h 2005-02-25 15:33:48.891198008 +0200
-+++ linux-stage/include/linux/ext3_extents.h 2005-02-25 15:33:48.944189952 +0200
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+#ifndef _LINUX_EXT3_EXTENTS
-+#define _LINUX_EXT3_EXTENTS
-+
-+/*
-+ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
-+ * become very little, so index split, in-depth growing and
-+ * other hard changes happens much more often
-+ * this is for debug purposes only
-+ */
-+#define AGRESSIVE_TEST_
-+
-+/*
-+ * if CHECK_BINSEARCH defined, then results of binary search
-+ * will be checked by linear search
-+ */
-+#define CHECK_BINSEARCH_
-+
-+/*
-+ * if EXT_DEBUG is defined you can use 'extdebug' mount option
-+ * to get lots of info what's going on
-+ */
-+#define EXT_DEBUG_
-+#ifdef EXT_DEBUG
-+#define ext_debug(tree,fmt,a...) \
-+do { \
-+ if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
-+ printk(fmt, ##a); \
-+} while (0);
-+#else
-+#define ext_debug(tree,fmt,a...)
-+#endif
-+
-+/*
-+ * if EXT_STATS is defined then stats numbers are collected
-+ * these number will be displayed at umount time
-+ */
-+#define EXT_STATS_
-+
-+
-+#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
-+
-+/*
-+ * ext3_inode has i_block array (total 60 bytes)
-+ * first 4 bytes are used to store:
-+ * - tree depth (0 mean there is no tree yet. all extents in the inode)
-+ * - number of alive extents in the inode
-+ */
-+
-+/*
-+ * this is extent on-disk structure
-+ * it's used at the bottom of the tree
-+ */
-+struct ext3_extent {
-+ __u32 ee_block; /* first logical block extent covers */
-+ __u16 ee_len; /* number of blocks covered by extent */
-+ __u16 ee_start_hi; /* high 16 bits of physical block */
-+ __u32 ee_start; /* low 32 bits of physical block */
-+};
-+
-+/*
-+ * this is index on-disk structure
-+ * it's used at all the levels, but the bottom
-+ */
-+struct ext3_extent_idx {
-+ __u32 ei_block; /* index covers logical blocks from 'block' */
-+ __u32 ei_leaf; /* pointer to the physical block of the next *
-+ * level. leaf or next index could bet here */
-+ __u16 ei_leaf_hi; /* high 16 bits of physical block */
-+ __u16 ei_unused;
-+};
-+
-+/*
-+ * each block (leaves and indexes), even inode-stored has header
-+ */
-+struct ext3_extent_header {
-+ __u16 eh_magic; /* probably will support different formats */
-+ __u16 eh_entries; /* number of valid entries */
-+ __u16 eh_max; /* capacity of store in entries */
-+ __u16 eh_depth; /* has tree real underlaying blocks? */
-+ __u32 eh_generation; /* flags(8 bits) | generation of the tree */
-+};
-+
-+#define EXT3_EXT_MAGIC 0xf30a
-+
-+/*
-+ * array of ext3_ext_path contains path to some extent
-+ * creation/lookup routines use it for traversal/splitting/etc
-+ * truncate uses it to simulate recursive walking
-+ */
-+struct ext3_ext_path {
-+ __u32 p_block;
-+ __u16 p_depth;
-+ struct ext3_extent *p_ext;
-+ struct ext3_extent_idx *p_idx;
-+ struct ext3_extent_header *p_hdr;
-+ struct buffer_head *p_bh;
-+};
-+
-+/*
-+ * structure for external API
-+ */
-+
-+/*
-+ * storage for cached extent
-+ */
-+struct ext3_ext_cache {
-+ __u32 ec_start;
-+ __u32 ec_block;
-+ __u32 ec_len;
-+ __u32 ec_type;
-+};
-+
-+#define EXT3_EXT_CACHE_NO 0
-+#define EXT3_EXT_CACHE_GAP 1
-+#define EXT3_EXT_CACHE_EXTENT 2
-+
-+/*
-+ * ext3_extents_tree is used to pass initial information
-+ * to top-level extents API
-+ */
-+struct ext3_extents_helpers;
-+struct ext3_extents_tree {
-+ struct inode *inode; /* inode which tree belongs to */
-+ void *root; /* ptr to data top of tree resides at */
-+ void *buffer; /* will be passed as arg to ^^ routines */
-+ int buffer_len;
-+ void *private;
-+ struct ext3_ext_cache *cex;/* last found extent */
-+ struct ext3_extents_helpers *ops;
-+};
-+
-+struct ext3_extents_helpers {
-+ int (*get_write_access)(handle_t *h, void *buffer);
-+ int (*mark_buffer_dirty)(handle_t *h, void *buffer);
-+ int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
-+ int (*remove_extent_credits)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*remove_extent)(struct ext3_extents_tree *,
-+ struct ext3_extent *, unsigned long,
-+ unsigned long);
-+ int (*new_block)(handle_t *, struct ext3_extents_tree *,
-+ struct ext3_ext_path *, struct ext3_extent *,
-+ int *);
-+};
-+
-+/*
-+ * to be called by ext3_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext3_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
-+ struct ext3_ext_path *,
-+ struct ext3_ext_cache *);
-+
-+#define EXT_CONTINUE 0
-+#define EXT_BREAK 1
-+#define EXT_REPEAT 2
-+
-+
-+#define EXT_MAX_BLOCK 0xffffffff
-+
-+
-+#define EXT_FIRST_EXTENT(__hdr__) \
-+ ((struct ext3_extent *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_FIRST_INDEX(__hdr__) \
-+ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
-+ sizeof(struct ext3_extent_header)))
-+#define EXT_HAS_FREE_INDEX(__path__) \
-+ ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
-+#define EXT_LAST_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_LAST_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
-+#define EXT_MAX_EXTENT(__hdr__) \
-+ (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_MAX_INDEX(__hdr__) \
-+ (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
-+#define EXT_HDR_GEN_BITS 24
-+#define EXT_HDR_GEN_MASK ((1 << EXT_HDR_GEN_BITS) - 1)
-+#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & EXT_HDR_GEN_MASK)
-+#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> EXT_HDR_GEN_BITS)
-+#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
-+
-+#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
-+#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
-+#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
-+#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
-+
-+#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
-+
-+#define EXT_CHECK_PATH(tree,path) \
-+{ \
-+ int depth = EXT_DEPTH(tree); \
-+ BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_idx < \
-+ __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_ext < \
-+ __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
-+ BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
-+ BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
-+ && depth != 0); \
-+ BUG_ON((path)[0].p_depth != depth); \
-+}
-+
-+
-+/*
-+ * this structure is used to gather extents from the tree via ioctl
-+ */
-+struct ext3_extent_buf {
-+ unsigned long start;
-+ int buflen;
-+ void *buffer;
-+ void *cur;
-+ int err;
-+};
-+
-+/*
-+ * this structure is used to collect stats info about the tree
-+ */
-+struct ext3_extent_tree_stats {
-+ int depth;
-+ int extents_num;
-+ int leaf_num;
-+};
-+
-+extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
-+extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
-+extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
-+extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
-+extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
-+extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
-+extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
-+extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
-+
-+static inline void
-+ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
-+{
-+ if (tree->cex)
-+ tree->cex->ec_type = EXT3_EXT_CACHE_NO;
-+}
-+#endif /* _LINUX_EXT3_EXTENTS */
-Index: linux-stage/include/linux/ext3_fs_i.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_i.h 2005-02-25 14:50:50.320200384 +0200
-+++ linux-stage/include/linux/ext3_fs_i.h 2005-02-25 15:33:48.945189800 +0200
-@@ -128,6 +128,8 @@
- */
- struct semaphore truncate_sem;
- struct inode vfs_inode;
-+
-+ __u32 i_cached_extent[4];
- };
-
- #endif /* _LINUX_EXT3_FS_I */
+++ /dev/null
-Index: linux-stage/fs/ext3/extents.c
-===================================================================
---- linux-stage.orig/fs/ext3/extents.c 2007-04-17 22:09:19.000000000 -0700
-+++ linux-stage/fs/ext3/extents.c 2007-04-17 22:12:05.000000000 -0700
-@@ -44,26 +44,49 @@
- #include <asm/uaccess.h>
-
-
--static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
--{
-- if (eh->eh_magic != EXT3_EXT_MAGIC) {
-- printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
-- (unsigned)eh->eh_magic);
-- return -EIO;
-- }
-- if (eh->eh_max == 0) {
-- printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
-- (unsigned)eh->eh_max);
-- return -EIO;
-- }
-- if (eh->eh_entries > eh->eh_max) {
-- printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
-- (unsigned)eh->eh_entries);
-- return -EIO;
-+static int __ext3_ext_check_header(const char *function, struct inode *inode,
-+ struct ext3_extent_header *eh, int depth,
-+ int max)
-+{
-+ const char *error_msg = NULL;
-+
-+ if (unlikely(eh->eh_magic != EXT3_EXT_MAGIC)) {
-+ error_msg = "invalid magic";
-+ goto corrupted;
-+ }
-+ if (unlikely(eh->eh_depth != depth)) {
-+ error_msg = "unexpected eh_depth";
-+ goto corrupted;
-+ }
-+ if (unlikely(eh->eh_max == 0)) {
-+ error_msg = "too small eh_max";
-+ goto corrupted;
-+ }
-+ if (unlikely(eh->eh_max > max)) {
-+ error_msg = "too large eh_max";
-+ goto corrupted;
-+ }
-+ if (unlikely(eh->eh_entries > eh->eh_max)) {
-+ error_msg = "invalid eh_entries";
-+ goto corrupted;
- }
- return 0;
-+
-+corrupted:
-+ ext3_error(inode->i_sb, function,
-+ "bad header in inode #%lu: %s - magic %x, "
-+ "entries %u, max %u(%u), depth %u(%u)",
-+ inode->i_ino, error_msg, eh->eh_magic,
-+ eh->eh_entries, eh->eh_max, max,
-+ eh->eh_depth, depth);
-+
-+ return -EIO;
- }
-
-+#define ext3_ext_check_header(inode,eh,depth,max) \
-+ __ext3_ext_check_header(__FUNCTION__,inode,eh,depth,max)
-+
-+
- static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
- {
- int err;
-@@ -226,6 +249,26 @@
- return size;
- }
-
-+static inline int
-+ext3_ext_max_entries(struct ext3_extents_tree *tree, int root, int depth)
-+{
-+ int max;
-+
-+ if (root) {
-+ if (depth == 0)
-+ max = ext3_ext_space_root(tree);
-+ else
-+ max = ext3_ext_space_root_idx(tree);
-+ } else {
-+ if (depth == 0)
-+ max = ext3_ext_space_block(tree);
-+ else
-+ max = ext3_ext_space_block_idx(tree);
-+ }
-+
-+ return max;
-+}
-+
- static void ext3_ext_show_path(struct ext3_extents_tree *tree,
- struct ext3_ext_path *path)
- {
-@@ -296,10 +339,6 @@
- struct ext3_extent_idx *ix;
- int l = 0, k, r;
-
-- EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-- EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-- EXT_ASSERT(eh->eh_entries > 0);
--
- ext_debug(tree, "binsearch for %d(idx): ", block);
-
- path->p_idx = ix = EXT_FIRST_INDEX(eh);
-@@ -359,9 +398,6 @@
- struct ext3_extent *ex;
- int l = 0, k, r;
-
-- EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
-- EXT_ASSERT(eh->eh_entries <= eh->eh_max);
--
- if (eh->eh_entries == 0) {
- /*
- * this leaf is empty yet:
-@@ -436,6 +472,7 @@
- struct ext3_extent_header *eh;
- struct buffer_head *bh;
- int depth, i, ppos = 0;
-+ int max;
-
- EXT_ASSERT(tree);
- EXT_ASSERT(tree->inode);
-@@ -443,17 +480,15 @@
-
- eh = EXT_ROOT_HDR(tree);
- EXT_ASSERT(eh);
-- if (ext3_ext_check_header(eh)) {
-+ i = depth = EXT_DEPTH(tree);
-+ max = ext3_ext_max_entries(tree, 1, i);
-+ if (ext3_ext_check_header(tree->inode, eh, i, max)) {
- /* don't free previously allocated path
- * -- caller should take care */
- path = NULL;
- goto err;
- }
-
-- i = depth = EXT_DEPTH(tree);
-- EXT_ASSERT(eh->eh_max);
-- EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
--
- /* account possible depth increase */
- if (!path) {
- path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
-@@ -484,8 +519,10 @@
- path[ppos].p_hdr = eh;
- i--;
-
-- if (ext3_ext_check_header(eh))
-+ max = ext3_ext_max_entries(tree, 0, i);
-+ if (ext3_ext_check_header(tree->inode, eh, i, max))
- goto err;
-+
- }
-
- path[ppos].p_depth = i;
-@@ -493,9 +530,6 @@
- path[ppos].p_ext = NULL;
- path[ppos].p_idx = NULL;
-
-- if (ext3_ext_check_header(eh))
-- goto err;
--
- /* find extent */
- ext3_ext_binsearch(tree, path + ppos, block);
-
-@@ -1545,6 +1579,8 @@
- ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
- if (!path[depth].p_hdr)
- path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
-+
-+ /* the header must be checked already in ext3_ext_remove_space() */
- eh = path[depth].p_hdr;
- EXT_ASSERT(eh);
- EXT_ASSERT(eh->eh_entries <= eh->eh_max);
-@@ -1707,7 +1743,7 @@
- int depth = EXT_DEPTH(tree);
- struct ext3_ext_path *path;
- handle_t *handle;
-- int i = 0, err = 0;
-+ int i = 0, err = 0, max;
-
- ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
-
-@@ -1730,7 +1766,13 @@
- }
- memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
- path[i].p_hdr = EXT_ROOT_HDR(tree);
--
-+
-+ max = ext3_ext_max_entries(tree, 1, depth);
-+ if (ext3_ext_check_header(inode, path[i].p_hdr, depth, max)) {
-+ err = -EIO;
-+ goto out;
-+ }
-+
- while (i >= 0 && err == 0) {
- if (i == depth) {
- /* this is leaf block */
-@@ -1740,16 +1782,13 @@
- i--;
- continue;
- }
--
-+
- /* this is index block */
- if (!path[i].p_hdr) {
- ext_debug(tree, "initialize header\n");
- path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
- }
-
-- EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
-- EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
--
- if (!path[i].p_idx) {
- /* this level hasn't touched yet */
- path[i].p_idx =
-@@ -1776,6 +1815,14 @@
- err = -EIO;
- break;
- }
-+ BUG_ON(i + 1 > depth);
-+ max = ext3_ext_max_entries(tree, 0, depth - i - 1);
-+ if (ext3_ext_check_header(inode,
-+ EXT_BLOCK_HDR(path[i+1].p_bh),
-+ depth - i - 1, max)) {
-+ err = -EIO;
-+ break;
-+ }
- /* put actual number of indexes to know is this
- * number got changed at the next iteration */
- path[i].p_block = path[i].p_hdr->eh_entries;
-@@ -1796,7 +1843,7 @@
- }
-
- /* TODO: flexible tree reduction should be here */
-- if (path->p_hdr->eh_entries == 0) {
-+ if (err == 0 && path->p_hdr->eh_entries == 0) {
- /*
- * truncate to zero freed all the tree
- * so, we need to correct eh_depth
-@@ -1810,6 +1857,7 @@
- }
- ext3_ext_tree_changed(tree);
-
-+out:
- kfree(path);
- ext3_journal_stop(handle);
-
+++ /dev/null
-Signed-off-by: Johann Lombardi <johann.lombardi@bull.net>
-
---- linux-2.6.12.orig/fs/ext3/super.c 2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12/fs/ext3/super.c 2005-11-07 13:37:30.000000000 +0100
-@@ -39,7 +39,8 @@
- #include "xattr.h"
- #include "acl.h"
-
--static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
-+static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
-+ unsigned long journal_devnum);
- static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
- int);
- static void ext3_commit_super (struct super_block * sb,
-@@ -586,7 +587,7 @@ enum {
- Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
- Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
-- Opt_commit, Opt_journal_update, Opt_journal_inum,
-+ Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
- Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
-@@ -624,6 +625,7 @@ static match_table_t tokens = {
- {Opt_commit, "commit=%u"},
- {Opt_journal_update, "journal=update"},
- {Opt_journal_inum, "journal=%u"},
-+ {Opt_journal_dev, "journal_dev=%u"},
- {Opt_abort, "abort"},
- {Opt_data_journal, "data=journal"},
- {Opt_data_ordered, "data=ordered"},
-@@ -663,8 +665,9 @@ static unsigned long get_sb_block(void *
- return sb_block;
- }
-
--static int parse_options (char * options, struct super_block *sb,
-- unsigned long * inum, unsigned long *n_blocks_count, int is_remount)
-+static int parse_options (char *options, struct super_block *sb,
-+ unsigned long *inum, unsigned long *journal_devnum,
-+ unsigned long *n_blocks_count, int is_remount)
- {
- struct ext3_sb_info *sbi = EXT3_SB(sb);
- char * p;
-@@ -805,6 +808,16 @@ static int parse_options (char * options
- return 0;
- *inum = option;
- break;
-+ case Opt_journal_dev:
-+ if (is_remount) {
-+ printk(KERN_ERR "EXT3-fs: cannot specify "
-+ "journal on remount\n");
-+ return 0;
-+ }
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ *journal_devnum = option;
-+ break;
- case Opt_noload:
- set_opt (sbi->s_mount_opt, NOLOAD);
- break;
-@@ -1250,6 +1263,7 @@ static int ext3_fill_super (struct super
- unsigned long logic_sb_block;
- unsigned long offset = 0;
- unsigned long journal_inum = 0;
-+ unsigned long journal_devnum = 0;
- unsigned long def_mount_opts;
- struct inode *root;
- int blocksize;
-@@ -1330,7 +1344,8 @@ static int ext3_fill_super (struct super
-
- set_opt(sbi->s_mount_opt, RESERVATION);
-
-- if (!parse_options ((char *) data, sb, &journal_inum, NULL, 0))
-+ if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
-+ NULL, 0))
- goto failed_mount;
-
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-@@ -1541,7 +1556,7 @@ static int ext3_fill_super (struct super
- */
- if (!test_opt(sb, NOLOAD) &&
- EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
-- if (ext3_load_journal(sb, es))
-+ if (ext3_load_journal(sb, es, journal_devnum))
- goto failed_mount2;
- } else if (journal_inum) {
- if (ext3_create_journal(sb, es, journal_inum))
-@@ -1821,15 +1836,24 @@ out_bdev:
- return NULL;
- }
-
--static int ext3_load_journal(struct super_block * sb,
-- struct ext3_super_block * es)
-+static int ext3_load_journal(struct super_block *sb,
-+ struct ext3_super_block *es,
-+ unsigned long journal_devnum)
- {
- journal_t *journal;
- int journal_inum = le32_to_cpu(es->s_journal_inum);
-- dev_t journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-+ dev_t journal_dev;
- int err = 0;
- int really_read_only;
-
-+ if (journal_devnum &&
-+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-+ printk(KERN_INFO "EXT3-fs: external journal device major/minor "
-+ "numbers have changed\n");
-+ journal_dev = new_decode_dev(journal_devnum);
-+ } else
-+ journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-+
- really_read_only = bdev_read_only(sb->s_bdev);
-
- /*
-@@ -1888,6 +1912,16 @@ static int ext3_load_journal(struct supe
-
- EXT3_SB(sb)->s_journal = journal;
- ext3_clear_journal_err(sb, es);
-+
-+ if (journal_devnum &&
-+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-+ es->s_journal_dev = cpu_to_le32(journal_devnum);
-+ sb->s_dirt = 1;
-+
-+ /* Make sure we flush the recovery flag to disk. */
-+ ext3_commit_super(sb, es, 1);
-+ }
-+
- return 0;
- }
-
-@@ -2093,13 +2127,13 @@ static int ext3_remount (struct super_bl
- {
- struct ext3_super_block * es;
- struct ext3_sb_info *sbi = EXT3_SB(sb);
-- unsigned long tmp;
-+ unsigned long tmp1, tmp2;
- unsigned long n_blocks_count = 0;
-
- /*
- * Allow the "check" option to be passed as a remount option.
- */
-- if (!parse_options(data, sb, &tmp, &n_blocks_count, 1))
-+ if (!parse_options(data, sb, &tmp1, &tmp2, &n_blocks_count, 1))
- return -EINVAL;
-
- if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+++ /dev/null
-Index: linux-2.6.15/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.15.orig/include/linux/ext3_fs_i.h 2006-02-24 15:41:30.000000000 +0300
-+++ linux-2.6.15/include/linux/ext3_fs_i.h 2006-02-24 15:41:31.000000000 +0300
-@@ -135,6 +135,8 @@ struct ext3_inode_info {
- struct inode vfs_inode;
-
- __u32 i_cached_extent[4];
-+
-+ void *i_filterdata;
- };
-
- #endif /* _LINUX_EXT3_FS_I */
-Index: linux-2.6.15/fs/ext3/super.c
-===================================================================
---- linux-2.6.15.orig/fs/ext3/super.c 2006-02-24 15:41:30.000000000 +0300
-+++ linux-2.6.15/fs/ext3/super.c 2006-02-24 15:42:02.000000000 +0300
-@@ -459,6 +459,7 @@ static struct inode *ext3_alloc_inode(st
- ei->vfs_inode.i_version = 1;
-
- memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
-+ ei->i_filterdata = NULL;
- return &ei->vfs_inode;
- }
-
+++ /dev/null
-Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/namei.c
-===================================================================
---- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/namei.c 2005-04-04 05:06:46.000000000 -0600
-+++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/namei.c 2005-04-04 05:09:18.000000000 -0600
-@@ -926,8 +926,16 @@
- struct inode *dir = dentry->d_parent->d_inode;
-
- sb = dir->i_sb;
-- if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err)))
-- return NULL;
-+ /* NFS may look up ".." - look at dx_root directory block */
-+ if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
-+ if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err)))
-+ return NULL;
-+ } else {
-+ frame = frames;
-+ frame->bh = NULL; /* for dx_release() */
-+ frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
-+ dx_set_block(frame->at, 0); /* dx_root block is 0 */
-+ }
- hash = hinfo.hash;
- do {
- block = dx_get_block(frame->at);
+++ /dev/null
-Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/ialloc.c 2005-05-16 14:10:54.000000000 -0600
-+++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c 2005-05-16 14:18:29.000000000 -0600
-@@ -352,13 +352,17 @@
- return -1;
- }
-
--static int find_group_other(struct super_block *sb, struct inode *parent)
-+static int find_group_other(struct super_block *sb, struct inode *parent,
-+ int mode)
- {
- int parent_group = EXT3_I(parent)->i_block_group;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
- int ngroups = EXT3_SB(sb)->s_groups_count;
- struct ext3_group_desc *desc;
- struct buffer_head *bh;
- int group, i;
-+ int best_group = -1;
-+ int avefreeb, freeb, best_group_freeb = 0;
-
- /*
- * Try to place the inode in its parent directory
-@@ -366,9 +370,9 @@
- group = parent_group;
- desc = ext3_get_group_desc (sb, group, &bh);
- if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-- le16_to_cpu(desc->bg_free_blocks_count))
-+ (!S_ISREG(mode) || le16_to_cpu(desc->bg_free_blocks_count)))
- return group;
--
-+ avefreeb = le32_to_cpu(sbi->s_es->s_free_blocks_count) / ngroups;
- /*
- * We're going to place this inode in a different blockgroup from its
- * parent. We want to cause files in a common directory to all land in
-@@ -381,33 +385,47 @@
- group = (group + parent->i_ino) % ngroups;
-
- /*
-- * Use a quadratic hash to find a group with a free inode and some free
-- * blocks.
-+ * Use a quadratic hash to find a group with a free inode and
-+ * average number of free blocks.
- */
- for (i = 1; i < ngroups; i <<= 1) {
- group += i;
- if (group >= ngroups)
- group -= ngroups;
- desc = ext3_get_group_desc (sb, group, &bh);
-- if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-- le16_to_cpu(desc->bg_free_blocks_count))
-+ if (!desc || !desc->bg_free_inodes_count)
-+ continue;
-+ if (!S_ISREG(mode))
-+ return group;
-+ if (le16_to_cpu(desc->bg_free_blocks_count) >= avefreeb)
- return group;
- }
-
- /*
-- * That failed: try linear search for a free inode, even if that group
-- * has no free blocks.
-+ * That failed: start from last group used to allocate inode
-+ * try linear search for a free inode and prefereably
-+ * free blocks.
- */
-- group = parent_group;
-+ group = sbi->s_last_alloc_group;
-+ if (group == -1)
-+ group = parent_group;
-+
- for (i = 0; i < ngroups; i++) {
- if (++group >= ngroups)
- group = 0;
- desc = ext3_get_group_desc (sb, group, &bh);
-- if (desc && le16_to_cpu(desc->bg_free_inodes_count))
-- return group;
-+ if (!desc || !desc->bg_free_inodes_count)
-+ continue;
-+ freeb = le16_to_cpu(desc->bg_free_blocks_count);
-+ if (freeb > best_group_freeb) {
-+ best_group_freeb = freeb;
-+ best_group = group;
-+ if (freeb >= avefreeb || !S_ISREG(mode))
-+ break;
-+ }
- }
--
-- return -1;
-+ sbi->s_last_alloc_group = best_group;
-+ return best_group;
- }
-
- /*
-@@ -454,7 +472,7 @@
- else
- group = find_group_orlov(sb, dir);
- } else
-- group = find_group_other(sb, dir);
-+ group = find_group_other(sb, dir, mode);
-
- err = -ENOSPC;
- if (group == -1)
-Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/super.c 2005-05-16 14:10:54.000000000 -0600
-+++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c 2005-05-16 14:17:14.000000000 -0600
-@@ -1297,6 +1297,7 @@
- percpu_counter_init(&sbi->s_dirs_counter);
- bgl_lock_init(&sbi->s_blockgroup_lock);
-
-+ sbi->s_last_alloc_group = -1;
- for (i = 0; i < db_count; i++) {
- block = descriptor_loc(sb, logic_sb_block, i);
- sbi->s_group_desc[i] = sb_bread(sb, block);
-Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/include/linux/ext3_fs_sb.h 2005-05-16 14:10:54.000000000 -0600
-+++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h 2005-05-16 14:17:14.000000000 -0600
-@@ -59,6 +59,8 @@
- struct percpu_counter s_freeinodes_counter;
- struct percpu_counter s_dirs_counter;
- struct blockgroup_lock s_blockgroup_lock;
-+ /* Last group used to allocate inode */
-+ int s_last_alloc_group;
-
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
+++ /dev/null
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2005-02-25 14:53:56.424908168 +0200
-+++ linux-stage/include/linux/ext3_fs.h 2005-02-25 14:53:59.376459464 +0200
-@@ -361,12 +361,13 @@
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
--#ifndef _LINUX_EXT2_FS_H
-+#ifndef clear_opt
- #define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
- #define set_opt(o, opt) o |= EXT3_MOUNT_##opt
- #define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
- EXT3_MOUNT_##opt)
--#else
-+#endif
-+#ifndef EXT2_MOUNT_NOLOAD
- #define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
- #define EXT2_MOUNT_ABORT EXT3_MOUNT_ABORT
- #define EXT2_MOUNT_DATA_FLAGS EXT3_MOUNT_DATA_FLAGS
+++ /dev/null
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2004-04-02 16:43:37.000000000 -0500
-+++ linux-stage/include/linux/ext3_fs.h 2004-04-02 16:43:37.000000000 -0500
-@@ -331,12 +331,13 @@
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
--#ifndef _LINUX_EXT2_FS_H
-+#ifndef clear_opt
- #define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
- #define set_opt(o, opt) o |= EXT3_MOUNT_##opt
- #define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
- EXT3_MOUNT_##opt)
--#else
-+#endif
-+#ifndef EXT2_MOUNT_NOLOAD
- #define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
- #define EXT2_MOUNT_ABORT EXT3_MOUNT_ABORT
- #define EXT2_MOUNT_DATA_FLAGS EXT3_MOUNT_DATA_FLAGS
+++ /dev/null
-Index: linux-2.6.9-full/fs/ext3/iopen.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/iopen.c 2006-04-25 08:51:11.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/iopen.c 2006-05-06 01:21:11.000000000 +0400
-@@ -94,9 +94,12 @@ static struct dentry *iopen_lookup(struc
- assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
- }
-
-- if (!list_empty(&inode->i_dentry)) {
-- alternate = list_entry(inode->i_dentry.next,
-- struct dentry, d_alias);
-+ list_for_each(lp, &inode->i_dentry) {
-+ alternate = list_entry(lp, struct dentry, d_alias);
-+ /* ignore dentries created for ".." to preserve
-+ * proper dcache hierarchy -- bug 10458 */
-+ if (alternate->d_flags & DCACHE_NFSFS_RENAMED)
-+ continue;
- dget_locked(alternate);
- spin_lock(&alternate->d_lock);
- alternate->d_flags |= DCACHE_REFERENCED;
-Index: linux-2.6.9-full/fs/ext3/namei.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/namei.c 2006-05-06 01:21:10.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/namei.c 2006-05-06 01:29:30.000000000 +0400
-@@ -1003,6 +1003,38 @@ static struct dentry *ext3_lookup(struct
- return ERR_PTR(-EACCES);
- }
-
-+ /* ".." shouldn't go into dcache to preserve dcache hierarchy
-+ * otherwise we'll get parent being a child of actual child.
-+ * see bug 10458 for details -bzzz */
-+ if (inode && (dentry->d_name.name[0] == '.' && (dentry->d_name.len == 1 ||
-+ (dentry->d_name.len == 2 && dentry->d_name.name[1] == '.')))) {
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* first, look for an existing dentry - any one is good */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ if (goal == NULL) {
-+ /* there is no alias, we need to make current dentry:
-+ * a) inaccessible for __d_lookup()
-+ * b) inaccessible for iopen */
-+ J_ASSERT(list_empty(&dentry->d_alias));
-+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-+ /* this is d_instantiate() ... */
-+ list_add(&dentry->d_alias, &inode->i_dentry);
-+ dentry->d_inode = inode;
-+ }
-+ spin_unlock(&dcache_lock);
-+ if (goal)
-+ iput(inode);
-+ return goal;
-+ }
-+
- return iopen_connect_dentry(dentry, inode, 1);
- }
-
+++ /dev/null
- fs/ext3/inode.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++
- fs/ext3/super.c | 3 +++
- 2 files changed, 55 insertions(+)
-
-Index: linux-2.6.0/fs/ext3/inode.c
-===================================================================
---- linux-2.6.0.orig/fs/ext3/inode.c 2003-12-31 00:33:49.000000000 +0300
-+++ linux-2.6.0/fs/ext3/inode.c 2003-12-31 01:14:17.000000000 +0300
-@@ -3136,3 +3136,62 @@
- ret = ret2;
- return ret;
- }
-+
-+int ext3_map_inode_page(struct inode *inode, struct page *page,
-+ unsigned long *blocks, int *created, int create)
-+{
-+ unsigned int blocksize, blocks_per_page;
-+ unsigned long iblock;
-+ struct buffer_head dummy;
-+ void *handle;
-+ int i, rc = 0, failed = 0, needed_blocks;
-+
-+ blocksize = inode->i_sb->s_blocksize;
-+ blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
-+ iblock = page->index * blocks_per_page;
-+
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ blocks[i] = ext3_bmap(inode->i_mapping, iblock);
-+ if (blocks[i] == 0) {
-+ failed++;
-+ if (created)
-+ created[i] = -1;
-+ } else if (created) {
-+ created[i] = 0;
-+ }
-+ }
-+
-+ if (failed == 0 || create == 0)
-+ return 0;
-+
-+ needed_blocks = ext3_writepage_trans_blocks(inode);
-+ handle = ext3_journal_start(inode, needed_blocks);
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-+
-+ iblock = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++, iblock++) {
-+ if (blocks[i] != 0)
-+ continue;
-+
-+ rc = ext3_get_block_handle(handle, inode, iblock, &dummy, 1, 1);
-+ if (rc) {
-+ printk(KERN_INFO "ext3_map_inode_page: error reading "
-+ "block %ld\n", iblock);
-+ goto out;
-+ }
-+ /* Unmap any metadata buffers from the block mapping, to avoid
-+ * data corruption due to direct-write from Lustre being
-+ * clobbered by a later flush of the blockdev metadata buffer.*/
-+ if (buffer_new(&dummy))
-+ unmap_underlying_metadata(dummy.b_bdev,
-+ dummy.b_blocknr);
-+ blocks[i] = dummy.b_blocknr;
-+ if (created)
-+ created[i] = 1;
-+ }
-+
-+ out:
-+ ext3_journal_stop(handle);
-+ return rc;
-+}
-Index: linux-2.6.0/fs/ext3/super.c
-===================================================================
---- linux-2.6.0.orig/fs/ext3/super.c 2003-12-31 00:33:49.000000000 +0300
-+++ linux-2.6.0/fs/ext3/super.c 2003-12-31 01:10:40.000000000 +0300
-@@ -2051,6 +2051,10 @@
- int nblocks, loff_t newsize);
- EXPORT_SYMBOL(ext3_prep_san_write);
-
-+int ext3_map_inode_page(struct inode *inode, struct page *page,
-+ unsigned long *blocks, int *created, int create);
-+EXPORT_SYMBOL(ext3_map_inode_page);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
- MODULE_LICENSE("GPL");
+++ /dev/null
-Index: linux-2.6.16.i686/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.16.i686.orig/include/linux/ext3_fs.h 2006-05-30 22:55:32.000000000 +0800
-+++ linux-2.6.16.i686/include/linux/ext3_fs.h 2006-05-30 23:02:59.000000000 +0800
-@@ -57,6 +57,14 @@
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT3_MULTIBLOCK_ALLOCATOR 1
-+
-+#define EXT3_MB_HINT_MERGE 1
-+#define EXT3_MB_HINT_RESERVED 2
-+#define EXT3_MB_HINT_METADATA 4
-+#define EXT3_MB_HINT_FIRST 8
-+#define EXT3_MB_HINT_BEST 16
-+
- /*
- * Special inodes numbers
- */
-@@ -383,6 +391,7 @@
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS 0x1000000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG 0x2000000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC 0x4000000/* Buddy allocation support */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -404,6 +413,14 @@
- #define ext3_find_first_zero_bit ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit ext2_find_next_zero_bit
-
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif /* __LITTLE_ENDIAN */
-+#endif /* !ext2_find_next_le_bit */
-+
- /*
- * Maximal mount counts between two filesystem checks
- */
-@@ -744,7 +753,9 @@
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
- extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
-- unsigned long);
-+ unsigned long, int);
-+extern int ext3_new_block_old(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *errp);
- extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
- unsigned long, unsigned long, int *);
- extern unsigned long ext3_count_free_blocks (struct super_block *);
-@@ -865,6 +874,17 @@
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *, int);
-+extern int ext3_mb_release(struct super_block *);
-+extern int ext3_mb_new_blocks(handle_t *, struct inode *, unsigned long, int *, int, int *);
-+extern int ext3_mb_reserve_blocks(struct super_block *, int);
-+extern void ext3_mb_release_blocks(struct super_block *, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif /* __KERNEL__ */
-
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-2.6.16.i686/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.16.i686.orig/include/linux/ext3_fs_sb.h 2006-03-20 13:53:29.000000000 +0800
-+++ linux-2.6.16.i686/include/linux/ext3_fs_sb.h 2006-05-30 23:02:59.000000000 +0800
-@@ -21,8 +21,14 @@
- #include <linux/wait.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
-
- /*
- * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
- #endif
-+
-+ /* for buddy allocator */
-+ struct ext3_group_info ***s_group_info;
-+ struct inode *s_buddy_cache;
-+ long s_blocks_reserved;
-+ spinlock_t s_reserve_lock;
-+ struct list_head s_active_transaction;
-+ struct list_head s_closed_transaction;
-+ struct list_head s_committed_transaction;
-+ spinlock_t s_md_lock;
-+ tid_t s_last_transaction;
-+ int s_mb_factor;
-+ unsigned short *s_mb_offsets, *s_mb_maxs;
-+ unsigned long s_stripe;
-+
-+ /* history to debug policy */
-+ struct ext3_mb_history *s_mb_history;
-+ int s_mb_history_cur;
-+ int s_mb_history_max;
-+ struct proc_dir_entry *s_mb_proc;
-+ spinlock_t s_mb_history_lock;
-+
-+ /* stats for buddy allocator */
-+ atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_t s_bal_success; /* we found long enough chunks */
-+ atomic_t s_bal_allocated; /* in blocks */
-+ atomic_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_t s_bal_goals; /* goal hits */
-+ atomic_t s_bal_breaks; /* too long searches */
-+ atomic_t s_bal_2orders; /* 2^order hits */
-+ spinlock_t s_bal_lock;
-+ unsigned long s_mb_buddies_generated;
-+ unsigned long long s_mb_generation_time;
- };
-+
-+#define EXT3_GROUP_INFO(sb, group) \
-+ EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+ [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
-
- #endif /* _LINUX_EXT3_FS_SB */
-Index: linux-2.6.16.i686/fs/ext3/super.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/super.c 2006-05-30 22:55:32.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/super.c 2006-05-30 23:02:59.000000000 +0800
-@@ -392,6 +392,7 @@
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_mb_release(sb);
- ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
-@@ -640,6 +641,7 @@
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_mballoc, Opt_nomballoc, Opt_stripe,
- Opt_grpquota
- };
-
-@@ -694,6 +695,9 @@
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
- {Opt_extdebug, "extdebug"},
-+ {Opt_mballoc, "mballoc"},
-+ {Opt_nomballoc, "nomballoc"},
-+ {Opt_stripe, "stripe=%u"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -1041,6 +1043,19 @@
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-+ case Opt_mballoc:
-+ set_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_nomballoc:
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_stripe:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_stripe = option;
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1766,6 +1771,7 @@
- ext3_count_dirs(sb));
-
- ext3_ext_init(sb);
-+ ext3_mb_init(sb, needs_recovery);
- lock_kernel();
- return 0;
-
-@@ -2699,7 +2705,13 @@
-
- static int __init init_ext3_fs(void)
- {
-- int err = init_ext3_xattr();
-+ int err;
-+
-+ err = init_ext3_proc();
-+ if (err)
-+ return err;
-+
-+ err = init_ext3_xattr();
- if (err)
- return err;
- err = init_inodecache();
-@@ -2721,6 +2733,7 @@
- unregister_filesystem(&ext3_fs_type);
- destroy_inodecache();
- exit_ext3_xattr();
-+ exit_ext3_proc();
- }
-
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-2.6.16.i686/fs/ext3/extents.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/extents.c 2006-05-30 22:55:32.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/extents.c 2006-05-30 23:02:59.000000000 +0800
-@@ -771,7 +771,7 @@
- for (i = 0; i < depth; i++) {
- if (!ablocks[i])
- continue;
-- ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- }
- }
- kfree(ablocks);
-@@ -1428,7 +1428,7 @@
- path->p_idx->ei_leaf);
- bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-- ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- return err;
- }
-
-@@ -1913,10 +1913,12 @@
- int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- handle_t *handle = ext3_journal_start(tree->inode, needed);
- struct buffer_head *bh;
-- int i;
-+ int i, metadata = 0;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-+ if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+ metadata = 1;
- if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- /* tail removal */
- unsigned long num, start;
-@@ -1928,7 +1930,7 @@
- bh = sb_find_get_block(tree->inode->i_sb, start + i);
- ext3_forget(handle, 0, tree->inode, bh, start + i);
- }
-- ext3_free_blocks(handle, tree->inode, start, num);
-+ ext3_free_blocks(handle, tree->inode, start, num, metadata);
- } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, ex->ee_block, ex->ee_len);
-Index: linux-2.6.16.i686/fs/ext3/inode.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/inode.c 2006-05-30 22:55:32.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/inode.c 2006-05-30 23:02:59.000000000 +0800
-@@ -568,7 +568,7 @@
- ext3_journal_forget(handle, branch[i].bh);
- }
- for (i = 0; i < keys; i++)
-- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
-+ ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1, 1);
- return err;
- }
-
-@@ -1862,7 +1862,7 @@
- }
- }
-
-- ext3_free_blocks(handle, inode, block_to_free, count);
-+ ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
-
- /**
-@@ -2035,7 +2035,7 @@
- ext3_journal_test_restart(handle, inode);
- }
-
-- ext3_free_blocks(handle, inode, nr, 1);
-+ ext3_free_blocks(handle, inode, nr, 1, 1);
-
- if (parent_bh) {
- /*
-Index: linux-2.6.16.i686/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/balloc.c 2006-03-20 13:53:29.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/balloc.c 2006-05-30 23:02:59.000000000 +0800
-@@ -80,7 +80,7 @@
- *
- * Return buffer_head on success or NULL in case of failure.
- */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- struct ext3_group_desc * desc;
-@@ -491,24 +491,6 @@
- return;
- }
-
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
-- unsigned long block, unsigned long count)
--{
-- struct super_block * sb;
-- int dquot_freed_blocks;
--
-- sb = inode->i_sb;
-- if (!sb) {
-- printk ("ext3_free_blocks: nonexistent device");
-- return;
-- }
-- ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-- if (dquot_freed_blocks)
-- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
-- return;
--}
--
- /*
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy. This
-@@ -1154,7 +1136,7 @@
- * bitmap, and then for any free bit if that fails.
- * This function also updates quota and i_blocks field.
- */
--int ext3_new_block(handle_t *handle, struct inode *inode,
-+int ext3_new_block_old(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
- {
- struct buffer_head *bitmap_bh = NULL;
-Index: linux-2.6.16.i686/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/xattr.c 2006-03-20 13:53:29.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/xattr.c 2006-05-30 23:02:59.000000000 +0800
-@@ -484,7 +484,7 @@
- ea_bdebug(bh, "refcount now=0; freeing");
- if (ce)
- mb_cache_entry_free(ce);
-- ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-+ ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
- get_bh(bh);
- ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
- } else {
-@@ -804,7 +804,7 @@
- new_bh = sb_getblk(sb, block);
- if (!new_bh) {
- getblk_failed:
-- ext3_free_blocks(handle, inode, block, 1);
-+ ext3_free_blocks(handle, inode, block, 1, 1);
- error = -EIO;
- goto cleanup;
- }
-Index: linux-2.6.16.i686/fs/ext3/mballoc.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/mballoc.c 2006-05-31 04:14:15.752410384 +0800
-+++ linux-2.6.16.i686/fs/ext3/mballoc.c 2006-05-30 23:03:38.000000000 +0800
-@@ -0,0 +1,2725 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ * - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ * - track min/max extents in each group for better group selection
-+ * - mb_mark_used() may allocate chunk right after splitting buddy
-+ * - special flag to advice allocator to look for requested + N blocks
-+ * this may improve interaction between extents and mballoc
-+ * - tree of groups sorted by number of free blocks
-+ * - percpu reservation code (hotpath)
-+ * - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...) printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS 30
-+
-+struct ext3_free_metadata {
-+ unsigned short group;
-+ unsigned short num;
-+ unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+ struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+ unsigned long bb_state;
-+ unsigned long bb_tid;
-+ struct ext3_free_metadata *bb_md_cur;
-+ unsigned short bb_first_free;
-+ unsigned short bb_free;
-+ unsigned short bb_fragments;
-+ unsigned short bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT3_GROUP_INFO_LOCKED_BIT 1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp) \
-+ (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+ __u16 fe_start;
-+ __u16 fe_len;
-+ __u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+ struct super_block *ac_sb;
-+
-+ /* search goals */
-+ struct ext3_free_extent ac_g_ex;
-+
-+ /* the best found extent */
-+ struct ext3_free_extent ac_b_ex;
-+
-+ /* number of iterations done. we have to track to limit searching */
-+ unsigned long ac_ex_scanned;
-+ __u16 ac_groups_scanned;
-+ __u16 ac_found;
-+ __u16 ac_tail;
-+ __u16 ac_buddy;
-+ __u8 ac_status;
-+ __u8 ac_flags; /* allocation hints */
-+ __u8 ac_criteria;
-+ __u8 ac_repeats;
-+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
-+ * N > 0, the field stores N, otherwise 0 */
-+
-+ struct page *ac_buddy_page;
-+ struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE 1
-+#define AC_STATUS_FOUND 2
-+#define AC_STATUS_BREAK 3
-+
-+struct ext3_mb_history {
-+ struct ext3_free_extent goal; /* goal allocation */
-+ struct ext3_free_extent result; /* result allocation */
-+ unsigned pid;
-+ unsigned ino;
-+ __u16 found; /* how many extents have been found */
-+ __u16 groups; /* how many groups have been scanned */
-+ __u16 tail; /* what tail broke some buddy */
-+ __u16 buddy; /* buddy the tail ^^^ broke */
-+ __u8 cr; /* which phase the result extent was found at */
-+ __u8 merged;
-+};
-+
-+struct ext3_buddy {
-+ struct page *bd_buddy_page;
-+ void *bd_buddy;
-+ struct page *bd_bitmap_page;
-+ void *bd_bitmap;
-+ struct ext3_group_info *bd_info;
-+ struct super_block *bd_sb;
-+ __u16 bd_blkbits;
-+ __u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+ struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+struct buffer_head *read_block_bitmap(struct super_block *, unsigned int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 7UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~7UL); \
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 3UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~3UL); \
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+ int fix;
-+#if BITS_PER_LONG == 64
-+ fix = ((unsigned long) addr & 7UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+ fix = ((unsigned long) addr & 3UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+ max += fix;
-+ start += fix;
-+ return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+ char *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(max != NULL);
-+
-+ if (order > e3b->bd_blkbits + 1) {
-+ *max = 0;
-+ return NULL;
-+ }
-+
-+ /* at order 0 we see each particular block */
-+ *max = 1 << (e3b->bd_blkbits + 3);
-+ if (order == 0)
-+ return EXT3_MB_BITMAP(e3b);
-+
-+ bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+ *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+ return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+ int order = e3b->bd_blkbits + 1;
-+ int max, max2, i, j, k, count;
-+ int fragments = 0, fstart;
-+ void *buddy, *buddy2;
-+
-+ if (!test_opt(e3b->bd_sb, MBALLOC))
-+ return;
-+
-+ {
-+ static int mb_check_counter = 0;
-+ if (mb_check_counter++ % 300 != 0)
-+ return;
-+ }
-+
-+ while (order > 1) {
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+ J_ASSERT(buddy2);
-+ J_ASSERT(buddy != buddy2);
-+ J_ASSERT(max * 2 == max2);
-+
-+ count = 0;
-+ for (i = 0; i < max; i++) {
-+
-+ if (mb_test_bit(i, buddy)) {
-+ /* only single bit in buddy2 may be 1 */
-+ if (!mb_test_bit(i << 1, buddy2))
-+ J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+ else if (!mb_test_bit((i << 1) + 1, buddy2))
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ continue;
-+ }
-+
-+ /* both bits in buddy2 must be 0 */
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+ for (j = 0; j < (1 << order); j++) {
-+ k = (i * (1 << order)) + j;
-+ J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+ }
-+ count++;
-+ }
-+ J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+ order--;
-+ }
-+
-+ fstart = -1;
-+ buddy = mb_find_buddy(e3b, 0, &max);
-+ for (i = 0; i < max; i++) {
-+ if (!mb_test_bit(i, buddy)) {
-+ J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+ if (fstart == -1) {
-+ fragments++;
-+ fstart = i;
-+ }
-+ continue;
-+ }
-+ fstart = -1;
-+ /* check used bits only */
-+ for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+ buddy2 = mb_find_buddy(e3b, j, &max2);
-+ k = i >> j;
-+ J_ASSERT(k < max2);
-+ J_ASSERT(mb_test_bit(k, buddy2));
-+ }
-+ }
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+ J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+ int order;
-+
-+ if (word > 255) {
-+ order = 7;
-+ word >>= 8;
-+ } else {
-+ order = -1;
-+ }
-+
-+ do {
-+ order++;
-+ word >>= 1;
-+ } while (word != 0);
-+
-+ return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+ int len, struct ext3_group_info *grp)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ unsigned short min, max, chunk, border;
-+
-+ mb_debug("mark %u/%u free\n", first, len);
-+ J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ border = 2 << sb->s_blocksize_bits;
-+
-+ while (len > 0) {
-+ /* find how many blocks can be covered since this position */
-+ max = ffs(first | border) - 1;
-+
-+ /* find how many blocks of power 2 we need to mark */
-+ min = fmsb(len);
-+
-+ mb_debug(" %u/%u -> max %u, min %u\n",
-+ first & ((2 << sb->s_blocksize_bits) - 1),
-+ len, max, min);
-+
-+ if (max < min)
-+ min = max;
-+ chunk = 1 << min;
-+
-+ /* mark multiblock chunks only */
-+ grp->bb_counters[min]++;
-+ if (min > 0) {
-+ mb_debug(" set %u at %u \n", first >> min,
-+ sbi->s_mb_offsets[min]);
-+ mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+ }
-+
-+ len -= chunk;
-+ first += chunk;
-+ }
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+ int group)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+ unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+ unsigned short i = 0, first, len;
-+ unsigned free = 0, fragments = 0;
-+ unsigned long long period = get_cycles();
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+ grp->bb_first_free = i;
-+ while (i < max) {
-+ fragments++;
-+ first = i;
-+ i = ext2_find_next_le_bit(bitmap, max, i);
-+ len = i - first;
-+ free += len;
-+ if (len > 1)
-+ ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+ else
-+ grp->bb_counters[0]++;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+ grp->bb_fragments = fragments;
-+
-+ /* bb_state shouldn't being modified because all
-+ * others waits for init completion on page lock */
-+ clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+ if (free != grp->bb_free) {
-+ printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+ group, free, grp->bb_free);
-+ grp->bb_free = free;
-+ }
-+
-+ period = get_cycles() - period;
-+ spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+ EXT3_SB(sb)->s_mb_buddies_generated++;
-+ EXT3_SB(sb)->s_mb_generation_time += period;
-+ spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+ int blocksize, blocks_per_page, groups_per_page;
-+ int err = 0, i, first_group, first_block;
-+ struct super_block *sb;
-+ struct buffer_head *bhs;
-+ struct buffer_head **bh;
-+ struct inode *inode;
-+ char *data, *bitmap;
-+
-+ mb_debug("init page %lu\n", page->index);
-+
-+ inode = page->mapping->host;
-+ sb = inode->i_sb;
-+ blocksize = 1 << inode->i_blkbits;
-+ blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+ groups_per_page = blocks_per_page >> 1;
-+ if (groups_per_page == 0)
-+ groups_per_page = 1;
-+
-+ /* allocate buffer_heads to read bitmaps */
-+ if (groups_per_page > 1) {
-+ err = -ENOMEM;
-+ i = sizeof(struct buffer_head *) * groups_per_page;
-+ bh = kmalloc(i, GFP_NOFS);
-+ if (bh == NULL)
-+ goto out;
-+ memset(bh, 0, i);
-+ } else
-+ bh = &bhs;
-+
-+ first_group = page->index * blocks_per_page / 2;
-+
-+ /* read all groups the page covers into the cache */
-+ for (i = 0; i < groups_per_page; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ err = -EIO;
-+ desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+ if (desc == NULL)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ if (bh[i] == NULL)
-+ goto out;
-+
-+ if (buffer_uptodate(bh[i]))
-+ continue;
-+
-+ lock_buffer(bh[i]);
-+ if (buffer_uptodate(bh[i])) {
-+ unlock_buffer(bh[i]);
-+ continue;
-+ }
-+
-+ get_bh(bh[i]);
-+ bh[i]->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh[i]);
-+ mb_debug("read bitmap for group %u\n", first_group + i);
-+ }
-+
-+ /* wait for I/O completion */
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ wait_on_buffer(bh[i]);
-+
-+ err = -EIO;
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ if (!buffer_uptodate(bh[i]))
-+ goto out;
-+
-+ first_block = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++) {
-+ int group;
-+
-+ group = (first_block + i) >> 1;
-+ if (group >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ data = page_address(page) + (i * blocksize);
-+ bitmap = bh[group - first_group]->b_data;
-+
-+ if ((first_block + i) & 1) {
-+ /* this is block of buddy */
-+ mb_debug("put buddy for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memset(data, 0xff, blocksize);
-+ EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+ memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+ sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+ ext3_mb_generate_buddy(sb, data, bitmap, group);
-+ } else {
-+ /* this is block of bitmap */
-+ mb_debug("put bitmap for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memcpy(data, bitmap, blocksize);
-+ }
-+ }
-+ SetPageUptodate(page);
-+
-+out:
-+ if (bh) {
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ brelse(bh[i]);
-+ if (bh != &bhs)
-+ kfree(bh);
-+ }
-+ return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *inode = sbi->s_buddy_cache;
-+ int blocks_per_page, block, pnum, poff;
-+ struct page *page;
-+
-+ mb_debug("load group %u\n", group);
-+
-+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+ e3b->bd_blkbits = sb->s_blocksize_bits;
-+ e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+ e3b->bd_sb = sb;
-+ e3b->bd_group = group;
-+ e3b->bd_buddy_page = NULL;
-+ e3b->bd_bitmap_page = NULL;
-+
-+ block = group * 2;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ /* we could use find_or_create_page(), but it locks page
-+ * what we'd like to avoid in fast path ... */
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_bitmap_page = page;
-+ e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ block++;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_buddy_page = page;
-+ e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ return 0;
-+
-+err:
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+ e3b->bd_buddy = NULL;
-+ e3b->bd_bitmap = NULL;
-+ return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+ int order = 1;
-+ void *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+ bb = EXT3_MB_BUDDY(e3b);
-+ while (order <= e3b->bd_blkbits + 1) {
-+ block = block >> 1;
-+ if (!mb_test_bit(block, bb)) {
-+ /* this block is part of buddy of order 'order' */
-+ return order;
-+ }
-+ bb += 1 << (e3b->bd_blkbits - order);
-+ order++;
-+ }
-+ return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_clear_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0xffffffff;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_set_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+ int block = 0, max = 0, order;
-+ void *buddy, *buddy2;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free += count;
-+ if (first < e3b->bd_info->bb_first_free)
-+ e3b->bd_info->bb_first_free = first;
-+
-+ /* let's maintain fragments counter */
-+ if (first != 0)
-+ block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+ if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+ if (block && max)
-+ e3b->bd_info->bb_fragments--;
-+ else if (!block && !max)
-+ e3b->bd_info->bb_fragments++;
-+
-+ /* let's maintain buddy itself */
-+ while (count-- > 0) {
-+ block = first++;
-+ order = 0;
-+
-+ J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+ mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ /* start of the buddy */
-+ buddy = mb_find_buddy(e3b, order, &max);
-+
-+ do {
-+ block &= ~1UL;
-+ if (mb_test_bit(block, buddy) ||
-+ mb_test_bit(block + 1, buddy))
-+ break;
-+
-+ /* both the buddies are free, try to coalesce them */
-+ buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+ if (!buddy2)
-+ break;
-+
-+ if (order > 0) {
-+ /* for special purposes, we don't set
-+ * free bits in bitmap */
-+ mb_set_bit(block, buddy);
-+ mb_set_bit(block + 1, buddy);
-+ }
-+ e3b->bd_info->bb_counters[order]--;
-+ e3b->bd_info->bb_counters[order]--;
-+
-+ block = block >> 1;
-+ order++;
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ mb_clear_bit(block, buddy2);
-+ buddy = buddy2;
-+ } while (1);
-+ }
-+ mb_check_buddy(e3b);
-+
-+ return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+ int needed, struct ext3_free_extent *ex)
-+{
-+ int next = block, max, ord;
-+ void *buddy;
-+
-+ J_ASSERT(ex != NULL);
-+
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ J_ASSERT(block < max);
-+ if (mb_test_bit(block, buddy)) {
-+ ex->fe_len = 0;
-+ ex->fe_start = 0;
-+ ex->fe_group = 0;
-+ return 0;
-+ }
-+
-+ if (likely(order == 0)) {
-+ /* find actual order */
-+ order = mb_find_order_for_block(e3b, block);
-+ block = block >> order;
-+ }
-+
-+ ex->fe_len = 1 << order;
-+ ex->fe_start = block << order;
-+ ex->fe_group = e3b->bd_group;
-+
-+ /* calc difference from given start */
-+ next = next - ex->fe_start;
-+ ex->fe_len -= next;
-+ ex->fe_start += next;
-+
-+ while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+ if (block + 1 >= max)
-+ break;
-+
-+ next = (block + 1) * (1 << order);
-+ if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+ break;
-+
-+ ord = mb_find_order_for_block(e3b, next);
-+
-+ order = ord;
-+ block = next >> order;
-+ ex->fe_len += 1 << order;
-+ }
-+
-+ J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+ return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+ int ord, mlen = 0, max = 0, cur;
-+ int start = ex->fe_start;
-+ int len = ex->fe_len;
-+ unsigned ret = 0;
-+ int len0 = len;
-+ void *buddy;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free -= len;
-+ if (e3b->bd_info->bb_first_free == start)
-+ e3b->bd_info->bb_first_free += len;
-+
-+ /* let's maintain fragments counter */
-+ if (start != 0)
-+ mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+ if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+ if (mlen && max)
-+ e3b->bd_info->bb_fragments++;
-+ else if (!mlen && !max)
-+ e3b->bd_info->bb_fragments--;
-+
-+ /* let's maintain buddy itself */
-+ while (len) {
-+ ord = mb_find_order_for_block(e3b, start);
-+
-+ if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+ /* the whole chunk may be allocated at once! */
-+ mlen = 1 << ord;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ J_ASSERT((start >> ord) < max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+ start += mlen;
-+ len -= mlen;
-+ J_ASSERT(len >= 0);
-+ continue;
-+ }
-+
-+ /* store for history */
-+ if (ret == 0)
-+ ret = len | (ord << 16);
-+
-+ /* we have to split large buddy */
-+ J_ASSERT(ord > 0);
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+
-+ ord--;
-+ cur = (start >> ord) & ~1U;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_clear_bit(cur, buddy);
-+ mb_clear_bit(cur + 1, buddy);
-+ e3b->bd_info->bb_counters[ord]++;
-+ e3b->bd_info->bb_counters[ord]++;
-+ }
-+
-+ /* now drop all the bits in bitmap */
-+ mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+ mb_check_buddy(e3b);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ unsigned long ret;
-+
-+ ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+ ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+ ac->ac_status = AC_STATUS_FOUND;
-+ ac->ac_tail = ret & 0xffff;
-+ ac->ac_buddy = ret >> 16;
-+
-+ /* hold in-core structures until allocated
-+ * blocks are marked non-free in on-disk bitmap */
-+ ac->ac_buddy_page = e3b->bd_buddy_page;
-+ page_cache_get(e3b->bd_buddy_page);
-+ ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+ page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+ struct ext3_free_extent *ex,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent *bex = &ac->ac_b_ex;
-+ struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+ J_ASSERT(ex->fe_len > 0);
-+ J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+ J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+ ac->ac_found++;
-+
-+ /*
-+ * The special case - take what you catch first
-+ */
-+ if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * Let's check whether the chunk is good enough
-+ */
-+ if (ex->fe_len == gex->fe_len) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * If this is first found extent, just store it in the context
-+ */
-+ if (bex->fe_len == 0) {
-+ *bex = *ex;
-+ return;
-+ }
-+
-+ /*
-+ * If new found extent is better, store it in the context
-+ */
-+ if (bex->fe_len < gex->fe_len) {
-+ /* if the request isn't satisfied, any found extent
-+ * larger than previous best one is better */
-+ if (ex->fe_len > bex->fe_len)
-+ *bex = *ex;
-+ } else if (ex->fe_len > gex->fe_len) {
-+ /* if the request is satisfied, then we try to find
-+ * an extent that still satisfy the request, but is
-+ * smaller than previous one */
-+ *bex = *ex;
-+ }
-+
-+ /*
-+ * Let's scan at least few extents and don't pick up a first one
-+ */
-+ if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+
-+ /*
-+ * We don't want to scan for a whole year
-+ */
-+ if (ac->ac_found > ext3_mb_max_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent ex = ac->ac_b_ex;
-+ int group = ex.fe_group, max, err;
-+
-+ J_ASSERT(ex.fe_len > 0);
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+ if (max > 0) {
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ int group = ac->ac_g_ex.fe_group, max, err;
-+ struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+ struct ext3_super_block *es = sbi->s_es;
-+ struct ext3_free_extent ex;
-+
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+ ac->ac_g_ex.fe_len, &ex);
-+
-+ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+ unsigned long start;
-+ start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+ ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+ if (start % sbi->s_stripe == 0) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ } else if (max >= ac->ac_g_ex.fe_len) {
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+ /* Sometimes, caller may want to merge even small
-+ * number of blocks to an existing extent */
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_group_info *grp = e3b->bd_info;
-+ void *buddy;
-+ int i, k, max;
-+
-+ J_ASSERT(ac->ac_2order > 0);
-+ for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+ if (grp->bb_counters[i] == 0)
-+ continue;
-+
-+ buddy = mb_find_buddy(e3b, i, &max);
-+ if (buddy == NULL) {
-+ printk(KERN_ALERT "looking for wrong order?\n");
-+ break;
-+ }
-+
-+ k = mb_find_next_zero_bit(buddy, max, 0);
-+ J_ASSERT(k < max);
-+
-+ ac->ac_found++;
-+
-+ ac->ac_b_ex.fe_len = 1 << i;
-+ ac->ac_b_ex.fe_start = k << i;
-+ ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+ ext3_mb_use_best_found(ac, e3b);
-+ J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+ if (unlikely(ext3_mb_stats))
-+ atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+ break;
-+ }
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ int i, free;
-+
-+ free = e3b->bd_info->bb_free;
-+ J_ASSERT(free > 0);
-+
-+ i = e3b->bd_info->bb_first_free;
-+
-+ while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+ i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+ if (i >= sb->s_blocksize * 8) {
-+ J_ASSERT(free == 0);
-+ break;
-+ }
-+
-+ mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(free >= ex.fe_len);
-+
-+ ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+ i += ex.fe_len;
-+ free -= ex.fe_len;
-+ }
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ unsigned long i, max;
-+
-+ J_ASSERT(sbi->s_stripe != 0);
-+
-+ /* find first stripe-aligned block */
-+ i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + le32_to_cpu(sbi->s_es->s_first_data_block);
-+ i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+ i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
-+ % EXT3_BLOCKS_PER_GROUP(sb);
-+
-+ while (i < sb->s_blocksize * 8) {
-+ if (!mb_test_bit(i, bitmap)) {
-+ max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+ if (max >= sbi->s_stripe) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ break;
-+ }
-+ }
-+ i += sbi->s_stripe;
-+ }
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+ int group, int cr)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+ unsigned free, fragments, i, bits;
-+
-+ J_ASSERT(cr >= 0 && cr < 4);
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+ free = grp->bb_free;
-+ fragments = grp->bb_fragments;
-+ if (free == 0)
-+ return 0;
-+ if (fragments == 0)
-+ return 0;
-+
-+ switch (cr) {
-+ case 0:
-+ J_ASSERT(ac->ac_2order != 0);
-+ bits = ac->ac_sb->s_blocksize_bits + 1;
-+ for (i = ac->ac_2order; i <= bits; i++)
-+ if (grp->bb_counters[i] > 0)
-+ return 1;
-+ break;
-+ case 1:
-+ if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 2:
-+ if (free >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 3:
-+ return 1;
-+ default:
-+ BUG();
-+ }
-+
-+ return 0;
-+}
-+
-+int ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *len, int flags, int *errp)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_allocation_context ac;
-+ int i, group, block, cr, err = 0;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ struct buffer_head *gdp_bh;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+
-+ J_ASSERT(len != NULL);
-+ J_ASSERT(*len > 0);
-+
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk("ext3_mb_new_nblocks: nonexistent device");
-+ return 0;
-+ }
-+
-+ if (!test_opt(sb, MBALLOC)) {
-+ static int ext3_mballoc_warning = 0;
-+ if (ext3_mballoc_warning == 0) {
-+ printk(KERN_ERR "EXT3-fs: multiblock request with "
-+ "mballoc disabled!\n");
-+ ext3_mballoc_warning++;
-+ }
-+ *len = 1;
-+ err = ext3_new_block_old(handle, inode, goal, errp);
-+ return err;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+
-+ /*
-+ * We can't allocate > group size
-+ */
-+ if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+ *len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* someone asks for non-reserved blocks */
-+ BUG_ON(*len > 1);
-+ err = ext3_mb_reserve_blocks(sb, 1);
-+ if (err) {
-+ *errp = err;
-+ return 0;
-+ }
-+ }
-+
-+ ac.ac_buddy_page = NULL;
-+ ac.ac_bitmap_page = NULL;
-+
-+ /*
-+ * Check quota for allocation of this blocks.
-+ */
-+ while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+ *len -= 1;
-+ if (*len == 0) {
-+ *errp = -EDQUOT;
-+ block = 0;
-+ goto out;
-+ }
-+
-+ /* start searching from the goal */
-+ if (goal < le32_to_cpu(es->s_first_data_block) ||
-+ goal >= le32_to_cpu(es->s_blocks_count))
-+ goal = le32_to_cpu(es->s_first_data_block);
-+ group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ /* set up allocation goals */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_groups_scanned = 0;
-+ ac.ac_ex_scanned = 0;
-+ ac.ac_found = 0;
-+ ac.ac_sb = inode->i_sb;
-+ ac.ac_g_ex.fe_group = group;
-+ ac.ac_g_ex.fe_start = block;
-+ ac.ac_g_ex.fe_len = *len;
-+ ac.ac_flags = flags;
-+ ac.ac_2order = 0;
-+ ac.ac_criteria = 0;
-+
-+ if (*len == 1 && sbi->s_stripe) {
-+ /* looks like a metadata, let's use a dirty hack for raid5
-+ * move all metadata in first groups in hope to hit cached
-+ * sectors and thus avoid read-modify cycles in raid5 */
-+ ac.ac_g_ex.fe_group = group = 0;
-+ }
-+
-+ /* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+ i = ffs(*len);
-+ if (i >= ext3_mb_order2_reqs) {
-+ i--;
-+ if ((*len & (~(1 << i))) == 0)
-+ ac.ac_2order = i;
-+ }
-+
-+ /* first, try the goal */
-+ err = ext3_mb_find_by_goal(&ac, &e3b);
-+ if (err)
-+ goto out_err;
-+ if (ac.ac_status == AC_STATUS_FOUND)
-+ goto found;
-+
-+ /* Let's just scan groups to find more-less suitable blocks */
-+ cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+ for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+ ac.ac_criteria = cr;
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+ if (group == EXT3_SB(sb)->s_groups_count)
-+ group = 0;
-+
-+ if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+ /* we need full data about the group
-+ * to make a good selection */
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+ ext3_mb_release_desc(&e3b);
-+ }
-+
-+ /* check is group good for our criteries */
-+ if (!ext3_mb_good_group(&ac, group, cr))
-+ continue;
-+
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+
-+ ext3_lock_group(sb, group);
-+ if (!ext3_mb_good_group(&ac, group, cr)) {
-+ /* someone did allocation from this group */
-+ ext3_unlock_group(sb, group);
-+ ext3_mb_release_desc(&e3b);
-+ continue;
-+ }
-+
-+ ac.ac_groups_scanned++;
-+ if (cr == 0)
-+ ext3_mb_simple_scan_group(&ac, &e3b);
-+ else if (cr == 1 && *len == sbi->s_stripe)
-+ ext3_mb_scan_aligned(&ac, &e3b);
-+ else
-+ ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+ ext3_unlock_group(sb, group);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ if (ac.ac_status != AC_STATUS_CONTINUE)
-+ break;
-+ }
-+ }
-+
-+ if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+ !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+ /*
-+ * We've been searching too long. Let's try to allocate
-+ * the best chunk we've found so far
-+ */
-+
-+ /*if (ac.ac_found > ext3_mb_max_to_scan)
-+ printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+ "%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+ ac.ac_g_ex.fe_len);*/
-+ ext3_mb_try_best_found(&ac, &e3b);
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * Someone more lucky has already allocated it.
-+ * The only thing we can do is just take first
-+ * found block(s)
-+ printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+ */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+ cr = 3;
-+ goto repeat;
-+ }
-+ }
-+
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * We aren't lucky definitely
-+ */
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ *errp = -ENOSPC;
-+ block = 0;
-+#if 1
-+ printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+ ac.ac_status, ac.ac_flags);
-+ printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+ ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+ ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+ printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+ sbi->s_blocks_reserved, ac.ac_found);
-+ printk("EXT3-fs: groups: ");
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+ printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+ printk("\n");
-+#endif
-+ goto out;
-+ }
-+
-+found:
-+ J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+ /* good news - free block(s) have been found. now it's time
-+ * to mark block(s) in good old journaled bitmap */
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ /* we made a desicion, now mark found blocks in good old
-+ * bitmap to be journaled */
-+
-+ ext3_debug("using block group %d(%d)\n",
-+ ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+ bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+ if (!bitmap_bh) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) {
-+ *errp = err;
-+ goto out_err;
-+ }
-+
-+ gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+ if (!gdp) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error(sb, "ext3_new_block",
-+ "Allocating block in system zone - "
-+ "block = %u", block);
-+#ifdef AGGRESSIVE_CHECK
-+ for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+ J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+ mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+ spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+ - ac.ac_b_ex.fe_len);
-+ spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err)
-+ goto out_err;
-+ err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ sb->s_dirt = 1;
-+ *errp = 0;
-+ brelse(bitmap_bh);
-+
-+ /* drop non-allocated, but dquote'd blocks */
-+ J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+ DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+ *len = ac.ac_b_ex.fe_len;
-+ J_ASSERT(*len > 0);
-+ J_ASSERT(block != 0);
-+ goto out;
-+
-+out_err:
-+ /* if we've already allocated something, roll it back */
-+ if (ac.ac_status == AC_STATUS_FOUND) {
-+ /* FIXME: free blocks here */
-+ }
-+
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ brelse(bitmap_bh);
-+ *errp = err;
-+ block = 0;
-+out:
-+ if (ac.ac_buddy_page)
-+ page_cache_release(ac.ac_buddy_page);
-+ if (ac.ac_bitmap_page)
-+ page_cache_release(ac.ac_bitmap_page);
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* block wasn't reserved before and we reserved it
-+ * at the beginning of allocation. it doesn't matter
-+ * whether we allocated anything or we failed: time
-+ * to release reservation. NOTE: because I expect
-+ * any multiblock request from delayed allocation
-+ * path only, here is single block always */
-+ ext3_mb_release_blocks(sb, 1);
-+ }
-+
-+ if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+ atomic_inc(&sbi->s_bal_reqs);
-+ atomic_add(*len, &sbi->s_bal_allocated);
-+ if (*len >= ac.ac_g_ex.fe_len)
-+ atomic_inc(&sbi->s_bal_success);
-+ atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+ if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+ ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+ atomic_inc(&sbi->s_bal_goals);
-+ if (ac.ac_found > ext3_mb_max_to_scan)
-+ atomic_inc(&sbi->s_bal_breaks);
-+ }
-+
-+ ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+ return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+ struct ext3_mb_history *history;
-+ struct super_block *sb;
-+ int start;
-+ int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+ struct ext3_mb_history *hs,
-+ int first)
-+{
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (!first && hs == s->history + s->start)
-+ return NULL;
-+ while (hs->goal.fe_len == 0) {
-+ hs++;
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (hs == s->history + s->start)
-+ return NULL;
-+ }
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs;
-+ int l = *pos;
-+
-+ if (l == 0)
-+ return SEQ_START_TOKEN;
-+ hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ if (!hs)
-+ return NULL;
-+ while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs = v;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ else
-+ return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+ struct ext3_mb_history *hs = v;
-+ char buf[20], buf2[20];
-+
-+ if (v == SEQ_START_TOKEN) {
-+ seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+ "pid", "inode", "goal", "result", "found", "grps", "cr",
-+ "merge", "tail", "broken");
-+ return 0;
-+ }
-+
-+ sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+ hs->goal.fe_start, hs->goal.fe_len);
-+ sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+ hs->result.fe_start, hs->result.fe_len);
-+ seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+ hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+ hs->cr, hs->merged ? "M" : "", hs->tail,
-+ hs->buddy ? 1 << hs->buddy : 0);
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+ .start = ext3_mb_seq_history_start,
-+ .next = ext3_mb_seq_history_next,
-+ .stop = ext3_mb_seq_history_stop,
-+ .show = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_proc_session *s;
-+ int rc, size;
-+
-+ s = kmalloc(sizeof(*s), GFP_KERNEL);
-+ if (s == NULL)
-+ return -EIO;
-+ size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+ s->history = kmalloc(size, GFP_KERNEL);
-+ if (s == NULL) {
-+ kfree(s);
-+ return -EIO;
-+ }
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(s->history, sbi->s_mb_history, size);
-+ s->max = sbi->s_mb_history_max;
-+ s->start = sbi->s_mb_history_cur % s->max;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+
-+ rc = seq_open(file, &ext3_mb_seq_history_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = s;
-+ } else {
-+ kfree(s->history);
-+ kfree(s);
-+ }
-+ return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+ struct seq_file *seq = (struct seq_file *)file->private_data;
-+ struct ext3_mb_proc_session *s = seq->private;
-+ kfree(s->history);
-+ kfree(s);
-+ return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_history_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+
-+ group = *pos + 1;
-+ return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ ++*pos;
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+ group = *pos + 1;
-+ return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+ struct super_block *sb = seq->private;
-+ long group = (long) v, i;
-+ struct sg {
-+ struct ext3_group_info info;
-+ unsigned short counters[16];
-+ } sg;
-+
-+ group--;
-+ if (group == 0)
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+ "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+ "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+ "2^11", "2^12", "2^13");
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+ sizeof(struct ext3_group_info);
-+ ext3_lock_group(sb, group);
-+ memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+ ext3_unlock_group(sb, group);
-+
-+ if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+ return 0;
-+
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+ sg.info.bb_fragments, sg.info.bb_first_free);
-+ for (i = 0; i <= 13; i++)
-+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+ sg.info.bb_counters[i] : 0);
-+ seq_printf(seq, " ]\n");
-+
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+ .start = ext3_mb_seq_groups_start,
-+ .next = ext3_mb_seq_groups_next,
-+ .stop = ext3_mb_seq_groups_stop,
-+ .show = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ int rc;
-+
-+ rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = sb;
-+ }
-+ return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_groups_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+ remove_proc_entry("mb_history", sbi->s_mb_proc);
-+ remove_proc_entry(name, proc_root_ext3);
-+
-+ if (sbi->s_mb_history)
-+ kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+ int i;
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+ if (sbi->s_mb_proc != NULL) {
-+ struct proc_dir_entry *p;
-+ p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_history_fops;
-+ p->data = sb;
-+ }
-+ p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_groups_fops;
-+ p->data = sb;
-+ }
-+ }
-+
-+ sbi->s_mb_history_max = 1000;
-+ sbi->s_mb_history_cur = 0;
-+ spin_lock_init(&sbi->s_mb_history_lock);
-+ i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+ sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+ memset(sbi->s_mb_history, 0, i);
-+ /* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+ struct ext3_allocation_context *ac)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_history h;
-+
-+ if (likely(sbi->s_mb_history == NULL))
-+ return;
-+
-+ h.pid = current->pid;
-+ h.ino = ino;
-+ h.goal = ac->ac_g_ex;
-+ h.result = ac->ac_b_ex;
-+ h.found = ac->ac_found;
-+ h.cr = ac->ac_criteria;
-+ h.groups = ac->ac_groups_scanned;
-+ h.tail = ac->ac_tail;
-+ h.buddy = ac->ac_buddy;
-+ h.merged = 0;
-+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+ h.merged = 1;
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+ if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+ sbi->s_mb_history_cur = 0;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, j, len, metalen;
-+ int num_meta_group_infos =
-+ (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ struct ext3_group_info **meta_group_info;
-+
-+ /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+ * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+ * So a two level scheme suffices for now. */
-+ sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+ num_meta_group_infos, GFP_KERNEL);
-+ if (sbi->s_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+ return -ENOMEM;
-+ }
-+ sbi->s_buddy_cache = new_inode(sb);
-+ if (sbi->s_buddy_cache == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+ goto err_freesgi;
-+ }
-+
-+ metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++) {
-+ if ((i + 1) == num_meta_group_infos)
-+ metalen = sizeof(*meta_group_info) *
-+ (sbi->s_groups_count -
-+ (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+ meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+ if (meta_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+ "buddy group\n");
-+ goto err_freemeta;
-+ }
-+ sbi->s_group_info[i] = meta_group_info;
-+ }
-+
-+ /*
-+ * calculate needed size. if change bb_counters size,
-+ * don't forget about ext3_mb_generate_buddy()
-+ */
-+ len = sizeof(struct ext3_group_info);
-+ len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+ for (i = 0; i < sbi->s_groups_count; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ meta_group_info =
-+ sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+ j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+ meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+ if (meta_group_info[j] == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+ i--;
-+ goto err_freebuddy;
-+ }
-+ desc = ext3_get_group_desc(sb, i, NULL);
-+ if (desc == NULL) {
-+ printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+ goto err_freebuddy;
-+ }
-+ memset(meta_group_info[j], 0, len);
-+ set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+ &meta_group_info[j]->bb_state);
-+ meta_group_info[j]->bb_free =
-+ le16_to_cpu(desc->bg_free_blocks_count);
-+ }
-+
-+ return 0;
-+
-+err_freebuddy:
-+ while (i >= 0) {
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ i--;
-+ }
-+ i = num_meta_group_infos;
-+err_freemeta:
-+ while (--i >= 0)
-+ kfree(sbi->s_group_info[i]);
-+ iput(sbi->s_buddy_cache);
-+err_freesgi:
-+ kfree(sbi->s_group_info);
-+ return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *root = sb->s_root->d_inode;
-+ unsigned i, offset, max;
-+ struct dentry *dentry;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+ sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_offsets == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ return -ENOMEM;
-+ }
-+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_maxs == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+
-+ /* order 0 is regular bitmap */
-+ sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+ sbi->s_mb_offsets[0] = 0;
-+
-+ i = 1;
-+ offset = 0;
-+ max = sb->s_blocksize << 2;
-+ do {
-+ sbi->s_mb_offsets[i] = offset;
-+ sbi->s_mb_maxs[i] = max;
-+ offset += 1 << (sb->s_blocksize_bits - i);
-+ max = max >> 1;
-+ i++;
-+ } while (i <= sb->s_blocksize_bits + 1);
-+
-+ /* init file for buddy data */
-+ if ((i = ext3_mb_init_backend(sb))) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return i;
-+ }
-+
-+ spin_lock_init(&sbi->s_reserve_lock);
-+ spin_lock_init(&sbi->s_md_lock);
-+ INIT_LIST_HEAD(&sbi->s_active_transaction);
-+ INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+ INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+ spin_lock_init(&sbi->s_bal_lock);
-+
-+ /* remove old on-disk buddy file */
-+ mutex_lock(&root->i_mutex);
-+ dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+ if (dentry->d_inode != NULL) {
-+ i = vfs_unlink(root, dentry);
-+ if (i != 0)
-+ printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+ }
-+ dput(dentry);
-+ mutex_unlock(&root->i_mutex);
-+
-+ ext3_mb_history_init(sb);
-+
-+ printk("EXT3-fs: mballoc enabled\n");
-+ return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, num_meta_group_infos;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ /* release freed, non-committed blocks */
-+ spin_lock(&sbi->s_md_lock);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_committed_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ ext3_mb_free_committed_blocks(sb);
-+
-+ if (sbi->s_group_info) {
-+ for (i = 0; i < sbi->s_groups_count; i++)
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ num_meta_group_infos = (sbi->s_groups_count +
-+ EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++)
-+ kfree(sbi->s_group_info[i]);
-+ kfree(sbi->s_group_info);
-+ }
-+ if (sbi->s_mb_offsets)
-+ kfree(sbi->s_mb_offsets);
-+ if (sbi->s_mb_maxs)
-+ kfree(sbi->s_mb_maxs);
-+ if (sbi->s_buddy_cache)
-+ iput(sbi->s_buddy_cache);
-+ if (sbi->s_blocks_reserved)
-+ printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+ sbi->s_blocks_reserved);
-+ if (ext3_mb_stats) {
-+ printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+ atomic_read(&sbi->s_bal_allocated),
-+ atomic_read(&sbi->s_bal_reqs),
-+ atomic_read(&sbi->s_bal_success));
-+ printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+ "%u 2^N hits, %u breaks\n",
-+ atomic_read(&sbi->s_bal_ex_scanned),
-+ atomic_read(&sbi->s_bal_goals),
-+ atomic_read(&sbi->s_bal_2orders),
-+ atomic_read(&sbi->s_bal_breaks));
-+ printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+ sbi->s_mb_buddies_generated++,
-+ sbi->s_mb_generation_time);
-+ }
-+
-+ ext3_mb_history_release(sb);
-+
-+ return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int err, i, count = 0, count2 = 0;
-+ struct ext3_free_metadata *md;
-+ struct ext3_buddy e3b;
-+
-+ if (list_empty(&sbi->s_committed_transaction))
-+ return;
-+
-+ /* there is committed blocks to be freed yet */
-+ do {
-+ /* get next array of blocks */
-+ md = NULL;
-+ spin_lock(&sbi->s_md_lock);
-+ if (!list_empty(&sbi->s_committed_transaction)) {
-+ md = list_entry(sbi->s_committed_transaction.next,
-+ struct ext3_free_metadata, list);
-+ list_del(&md->list);
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ if (md == NULL)
-+ break;
-+
-+ mb_debug("gonna free %u blocks in group %u (0x%p):",
-+ md->num, md->group, md);
-+
-+ err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ /* there are blocks to put in buddy to make them really free */
-+ count += md->num;
-+ count2++;
-+ ext3_lock_group(sb, md->group);
-+ for (i = 0; i < md->num; i++) {
-+ mb_debug(" %u", md->blocks[i]);
-+ mb_free_blocks(&e3b, md->blocks[i], 1);
-+ }
-+ mb_debug("\n");
-+ ext3_unlock_group(sb, md->group);
-+
-+ /* balance refcounts from ext3_mb_free_metadata() */
-+ page_cache_release(e3b.bd_buddy_page);
-+ page_cache_release(e3b.bd_bitmap_page);
-+
-+ kfree(md);
-+ ext3_mb_release_desc(&e3b);
-+
-+ } while (md);
-+ mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+ return;
-+
-+ /* new transaction! time to close last one and free blocks for
-+ * committed transaction. we know that only transaction can be
-+ * active, so previos transaction can be being logged and we
-+ * know that transaction before previous is known to be already
-+ * logged. this means that now we may free blocks freed in all
-+ * transactions before previous one. hope I'm clear enough ... */
-+
-+ spin_lock(&sbi->s_md_lock);
-+ if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+ mb_debug("new transaction %lu, old %lu\n",
-+ (unsigned long) handle->h_transaction->t_tid,
-+ (unsigned long) sbi->s_last_transaction);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_closed_transaction);
-+ sbi->s_last_transaction = handle->h_transaction->t_tid;
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+ int group, int block, int count)
-+{
-+ struct ext3_group_info *db = e3b->bd_info;
-+ struct super_block *sb = e3b->bd_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_free_metadata *md;
-+ int i;
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ ext3_lock_group(sb, group);
-+ for (i = 0; i < count; i++) {
-+ md = db->bb_md_cur;
-+ if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+ db->bb_md_cur = NULL;
-+ md = NULL;
-+ }
-+
-+ if (md == NULL) {
-+ ext3_unlock_group(sb, group);
-+ md = kmalloc(sizeof(*md), GFP_KERNEL);
-+ if (md == NULL)
-+ return -ENOMEM;
-+ md->num = 0;
-+ md->group = group;
-+
-+ ext3_lock_group(sb, group);
-+ if (db->bb_md_cur == NULL) {
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&md->list, &sbi->s_active_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ /* protect buddy cache from being freed,
-+ * otherwise we'll refresh it from
-+ * on-disk bitmap and lose not-yet-available
-+ * blocks */
-+ page_cache_get(e3b->bd_buddy_page);
-+ page_cache_get(e3b->bd_bitmap_page);
-+ db->bb_md_cur = md;
-+ db->bb_tid = handle->h_transaction->t_tid;
-+ mb_debug("new md 0x%p for group %u\n",
-+ md, md->group);
-+ } else {
-+ kfree(md);
-+ md = db->bb_md_cur;
-+ }
-+ }
-+
-+ BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+ md->blocks[md->num] = block + i;
-+ md->num++;
-+ if (md->num == EXT3_BB_MAX_BLOCKS) {
-+ /* no more space, put full container on a sb's list */
-+ db->bb_md_cur = NULL;
-+ }
-+ }
-+ ext3_unlock_group(sb, group);
-+ return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long block, unsigned long count,
-+ int metadata, int *freed)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ unsigned long bit, overflow;
-+ struct buffer_head *gd_bh;
-+ unsigned long block_group;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ int err = 0, ret;
-+
-+ *freed = 0;
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk ("ext3_free_blocks: nonexistent device");
-+ return;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+ if (block < le32_to_cpu(es->s_first_data_block) ||
-+ block + count < block ||
-+ block + count > le32_to_cpu(es->s_blocks_count)) {
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks not in datazone - "
-+ "block = %lu, count = %lu", block, count);
-+ goto error_return;
-+ }
-+
-+ ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+ overflow = 0;
-+ block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ /*
-+ * Check to see if we are freeing blocks across a group
-+ * boundary.
-+ */
-+ if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+ overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+ count -= overflow;
-+ }
-+ brelse(bitmap_bh);
-+ bitmap_bh = read_block_bitmap(sb, block_group);
-+ if (!bitmap_bh)
-+ goto error_return;
-+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+ if (!gdp)
-+ goto error_return;
-+
-+ if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+ in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+ in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group) ||
-+ in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks in system zones - "
-+ "Block = %lu, count = %lu",
-+ block, count);
-+
-+ BUFFER_TRACE(bitmap_bh, "getting write access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err)
-+ goto error_return;
-+
-+ /*
-+ * We are about to modify some metadata. Call the journal APIs
-+ * to unshare ->b_data if a currently-committing transaction is
-+ * using it
-+ */
-+ BUFFER_TRACE(gd_bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, gd_bh);
-+ if (err)
-+ goto error_return;
-+
-+ err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+ if (err)
-+ goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+ {
-+ int i;
-+ for (i = 0; i < count; i++)
-+ J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+ }
-+#endif
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+ /* We dirtied the bitmap block */
-+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+ if (metadata) {
-+ /* blocks being freed are metadata. these blocks shouldn't
-+ * be used until this transaction is committed */
-+ ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+ } else {
-+ ext3_lock_group(sb, block_group);
-+ mb_free_blocks(&e3b, bit, count);
-+ ext3_unlock_group(sb, block_group);
-+ }
-+
-+ spin_lock(sb_bgl_lock(sbi, block_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+ spin_unlock(sb_bgl_lock(sbi, block_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ *freed = count;
-+
-+ /* And the group descriptor block */
-+ BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+ ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+ if (!err) err = ret;
-+
-+ if (overflow && !err) {
-+ block += count;
-+ count = overflow;
-+ goto do_more;
-+ }
-+ sb->s_dirt = 1;
-+error_return:
-+ brelse(bitmap_bh);
-+ ext3_std_error(sb, err);
-+ return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int free, ret = -ENOSPC;
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+ if (blocks <= free - sbi->s_blocks_reserved) {
-+ sbi->s_blocks_reserved += blocks;
-+ ret = 0;
-+ }
-+ spin_unlock(&sbi->s_reserve_lock);
-+ return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ sbi->s_blocks_reserved -= blocks;
-+ WARN_ON(sbi->s_blocks_reserved < 0);
-+ if (sbi->s_blocks_reserved < 0)
-+ sbi->s_blocks_reserved = 0;
-+ spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+int ext3_new_block(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *errp)
-+{
-+ int ret, len;
-+
-+ if (!test_opt(inode->i_sb, MBALLOC)) {
-+ ret = ext3_new_block_old(handle, inode, goal, errp);
-+ goto out;
-+ }
-+ len = 1;
-+ ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+ return ret;
-+}
-+
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+ unsigned long block, unsigned long count, int metadata)
-+{
-+ struct super_block *sb;
-+ int freed;
-+
-+ sb = inode->i_sb;
-+ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+ ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+ else
-+ ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
-+ if (freed)
-+ DQUOT_FREE_BLOCK(inode, freed);
-+ return;
-+}
-+
-+#define EXT3_ROOT "ext3"
-+#define EXT3_MB_STATS_NAME "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_stats);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_STATS_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+ return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_max_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_min_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_order2_reqs = value;
-+
-+ return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+ struct proc_dir_entry *proc_ext3_mb_stats;
-+ struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+ proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+ if (proc_root_ext3 == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+ return -EIO;
-+ }
-+
-+ /* Initialize EXT3_MB_STATS_NAME */
-+ proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_stats == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_STATS_NAME);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_stats->data = NULL;
-+ proc_ext3_mb_stats->read_proc = ext3_mb_stats_read;
-+ proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+ /* Initialize EXT3_MAX_TO_SCAN_NAME */
-+ proc_ext3_mb_max_to_scan = create_proc_entry(
-+ EXT3_MB_MAX_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_max_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_max_to_scan->data = NULL;
-+ proc_ext3_mb_max_to_scan->read_proc = ext3_mb_max_to_scan_read;
-+ proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+ /* Initialize EXT3_MIN_TO_SCAN_NAME */
-+ proc_ext3_mb_min_to_scan = create_proc_entry(
-+ EXT3_MB_MIN_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_min_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_min_to_scan->data = NULL;
-+ proc_ext3_mb_min_to_scan->read_proc = ext3_mb_min_to_scan_read;
-+ proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+ /* Initialize EXT3_ORDER2_REQ */
-+ proc_ext3_mb_order2_req = create_proc_entry(
-+ EXT3_MB_ORDER2_REQ,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_order2_req == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_ORDER2_REQ);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_order2_req->data = NULL;
-+ proc_ext3_mb_order2_req->read_proc = ext3_mb_order2_req_read;
-+ proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+ return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-2.6.16.i686/fs/ext3/Makefile
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/Makefile 2006-05-30 22:55:32.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/Makefile 2006-05-30 23:02:59.000000000 +0800
-@@ -6,7 +6,7 @@
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o \
-- extents.o
-+ extents.o mballoc.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+++ /dev/null
-Index: linux-2.6.5-7.282-full/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-7.282-full.orig/include/linux/ext3_fs.h 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/include/linux/ext3_fs.h 2006-10-24 22:18:28.000000000 +0400
-@@ -57,6 +57,14 @@ struct statfs;
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT3_MULTIBLOCK_ALLOCATOR 1
-+
-+#define EXT3_MB_HINT_MERGE 1
-+#define EXT3_MB_HINT_RESERVED 2
-+#define EXT3_MB_HINT_METADATA 4
-+#define EXT3_MB_HINT_FIRST 8
-+#define EXT3_MB_HINT_BEST 16
-+
- /*
- * Special inodes numbers
- */
-@@ -339,6 +347,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC 0x800000/* Buddy allocation support */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -361,6 +370,14 @@ struct ext3_inode {
- #define ext3_find_first_zero_bit ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit ext2_find_next_zero_bit
-
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif /* __LITTLE_ENDIAN */
-+#endif /* !ext2_find_next_le_bit */
-+
- /*
- * Maximal mount counts between two filesystem checks
- */
-@@ -700,7 +717,10 @@ extern int ext3_bg_has_super(struct supe
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
- extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
-- unsigned long);
-+ unsigned long, int);
-+extern int ext3_new_block_old(handle_t *, struct inode *, unsigned long, int *);
-+extern void ext3_free_blocks_old(handle_t *, struct inode *, unsigned long,
-+ unsigned long);
- extern unsigned long ext3_count_free_blocks (struct super_block *);
- extern void ext3_check_blocks_bitmap (struct super_block *);
- extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-@@ -824,6 +843,17 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *, int);
-+extern int ext3_mb_release(struct super_block *);
-+extern int ext3_mb_new_blocks(handle_t *, struct inode *, unsigned long, int *, int, int *);
-+extern int ext3_mb_reserve_blocks(struct super_block *, int);
-+extern void ext3_mb_release_blocks(struct super_block *, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif /* __KERNEL__ */
-
- #define EXT3_IOC_CREATE_INUM _IOW('f', 5, long)
-Index: linux-2.6.5-7.282-full/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.5-7.282-full.orig/include/linux/ext3_fs_sb.h 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/include/linux/ext3_fs_sb.h 2006-10-24 22:18:28.000000000 +0400
-@@ -23,9 +23,15 @@
- #define EXT_INCLUDE
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
-
- /*
- * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@ struct ext3_sb_info {
- struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */
- wait_queue_head_t ro_wait_queue; /* For people waiting for the fs to go read-only */
- #endif
-+
-+ /* for buddy allocator */
-+ struct ext3_group_info ***s_group_info;
-+ struct inode *s_buddy_cache;
-+ long s_blocks_reserved;
-+ spinlock_t s_reserve_lock;
-+ struct list_head s_active_transaction;
-+ struct list_head s_closed_transaction;
-+ struct list_head s_committed_transaction;
-+ spinlock_t s_md_lock;
-+ tid_t s_last_transaction;
-+ int s_mb_factor;
-+ unsigned short *s_mb_offsets, *s_mb_maxs;
-+ unsigned long s_stripe;
-+
-+ /* history to debug policy */
-+ struct ext3_mb_history *s_mb_history;
-+ int s_mb_history_cur;
-+ int s_mb_history_max;
-+ struct proc_dir_entry *s_mb_proc;
-+ spinlock_t s_mb_history_lock;
-+
-+ /* stats for buddy allocator */
-+ atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_t s_bal_success; /* we found long enough chunks */
-+ atomic_t s_bal_allocated; /* in blocks */
-+ atomic_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_t s_bal_goals; /* goal hits */
-+ atomic_t s_bal_breaks; /* too long searches */
-+ atomic_t s_bal_2orders; /* 2^order hits */
-+ spinlock_t s_bal_lock;
-+ unsigned long s_mb_buddies_generated;
-+ unsigned long long s_mb_generation_time;
- };
-
-+#define EXT3_GROUP_INFO(sb, group) \
-+ EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+ [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
-+
- #endif /* _LINUX_EXT3_FS_SB */
-Index: linux-2.6.5-7.282-full/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/super.c 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/super.c 2006-10-24 22:18:28.000000000 +0400
-@@ -389,6 +389,7 @@ void ext3_put_super (struct super_block
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_mb_release(sb);
- ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
-@@ -588,6 +589,7 @@ enum {
- Opt_err,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_mballoc, Opt_nomballoc, Opt_stripe,
- };
-
- static match_table_t tokens = {
-@@ -634,6 +636,9 @@ static match_table_t tokens = {
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
- {Opt_extdebug, "extdebug"},
-+ {Opt_mballoc, "mballoc"},
-+ {Opt_nomballoc, "nomballoc"},
-+ {Opt_stripe, "stripe=%u"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL}
- };
-@@ -859,6 +864,19 @@ static int parse_options (char * options
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-+ case Opt_mballoc:
-+ set_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_nomballoc:
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_stripe:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_stripe = option;
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1512,6 +1530,7 @@ static int ext3_fill_super (struct super
- ext3_count_dirs(sb));
-
- ext3_ext_init(sb);
-+ ext3_mb_init(sb, needs_recovery);
-
- return 0;
-
-@@ -2160,7 +2179,13 @@ static struct file_system_type ext3_fs_t
-
- static int __init init_ext3_fs(void)
- {
-- int err = init_ext3_xattr();
-+ int err;
-+
-+ err = init_ext3_proc();
-+ if (err)
-+ return err;
-+
-+ err = init_ext3_xattr();
- if (err)
- return err;
- err = init_inodecache();
-@@ -2189,6 +2214,7 @@ static void __exit exit_ext3_fs(void)
- unregister_filesystem(&ext3_fs_type);
- destroy_inodecache();
- exit_ext3_xattr();
-+ exit_ext3_proc();
- }
-
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-2.6.5-7.282-full/fs/ext3/extents.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/extents.c 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/extents.c 2006-10-24 22:18:28.000000000 +0400
-@@ -779,7 +779,7 @@ cleanup:
- for (i = 0; i < depth; i++) {
- if (!ablocks[i])
- continue;
-- ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- }
- }
- kfree(ablocks);
-@@ -1438,7 +1438,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- path->p_idx->ei_leaf);
- bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-- ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- return err;
- }
-
-@@ -1923,10 +1923,12 @@ ext3_remove_blocks(struct ext3_extents_t
- int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- handle_t *handle = ext3_journal_start(tree->inode, needed);
- struct buffer_head *bh;
-- int i;
-+ int i, metadata = 0;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-+ if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+ metadata = 1;
- if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- /* tail removal */
- unsigned long num, start;
-@@ -1938,7 +1940,7 @@ ext3_remove_blocks(struct ext3_extents_t
- bh = sb_find_get_block(tree->inode->i_sb, start + i);
- ext3_forget(handle, 0, tree->inode, bh, start + i);
- }
-- ext3_free_blocks(handle, tree->inode, start, num);
-+ ext3_free_blocks(handle, tree->inode, start, num, metadata);
- } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, ex->ee_block, ex->ee_len);
-Index: linux-2.6.5-7.282-full/fs/ext3/inode.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/inode.c 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/inode.c 2006-10-24 22:18:28.000000000 +0400
-@@ -574,7 +574,7 @@ static int ext3_alloc_branch(handle_t *h
- ext3_journal_forget(handle, branch[i].bh);
- }
- for (i = 0; i < keys; i++)
-- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
-+ ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1, 1);
- return err;
- }
-
-@@ -675,7 +675,7 @@ err_out:
- if (err == -EAGAIN)
- for (i = 0; i < num; i++)
- ext3_free_blocks(handle, inode,
-- le32_to_cpu(where[i].key), 1);
-+ le32_to_cpu(where[i].key), 1, 1);
- return err;
- }
-
-@@ -1837,7 +1837,7 @@ ext3_clear_blocks(handle_t *handle, stru
- }
- }
-
-- ext3_free_blocks(handle, inode, block_to_free, count);
-+ ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
-
- /**
-@@ -2008,7 +2008,7 @@ static void ext3_free_branches(handle_t
- ext3_journal_test_restart(handle, inode);
- }
-
-- ext3_free_blocks(handle, inode, nr, 1);
-+ ext3_free_blocks(handle, inode, nr, 1, 1);
-
- if (parent_bh) {
- /*
-Index: linux-2.6.5-7.282-full/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/balloc.c 2006-08-30 18:12:13.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/balloc.c 2006-10-24 22:18:28.000000000 +0400
-@@ -78,7 +78,7 @@ struct ext3_group_desc * ext3_get_group_
- *
- * Return buffer_head on success or NULL in case of failure.
- */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- struct ext3_group_desc * desc;
-@@ -274,7 +274,7 @@ void ext3_discard_reservation(struct ino
- }
-
- /* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
-+void ext3_free_blocks_old(handle_t *handle, struct inode *inode,
- unsigned long block, unsigned long count)
- {
- struct buffer_head *bitmap_bh = NULL;
-@@ -1142,7 +1142,7 @@ int ext3_should_retry_alloc(struct super
- * bitmap, and then for any free bit if that fails.
- * This function also updates quota and i_blocks field.
- */
--int ext3_new_block(handle_t *handle, struct inode *inode,
-+int ext3_new_block_old(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
- {
- struct buffer_head *bitmap_bh = NULL;
-Index: linux-2.6.5-7.282-full/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/xattr.c 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/xattr.c 2006-10-24 22:18:28.000000000 +0400
-@@ -1371,7 +1371,7 @@ ext3_xattr_set_handle2(handle_t *handle,
- new_bh = sb_getblk(sb, block);
- if (!new_bh) {
- getblk_failed:
-- ext3_free_blocks(handle, inode, block, 1);
-+ ext3_free_blocks(handle, inode, block, 1, 1);
- error = -EIO;
- goto cleanup;
- }
-@@ -1411,7 +1411,7 @@ getblk_failed:
- if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
- /* Free the old block. */
- ea_bdebug(old_bh, "freeing");
-- ext3_free_blocks(handle, inode, old_bh->b_blocknr, 1);
-+ ext3_free_blocks(handle, inode, old_bh->b_blocknr, 1, 1);
-
- /* ext3_forget() calls bforget() for us, but we
- let our caller release old_bh, so we need to
-@@ -1519,7 +1519,7 @@ ext3_xattr_delete_inode(handle_t *handle
- mb_cache_entry_free(ce);
- ce = NULL;
- }
-- ext3_free_blocks(handle, inode, EXT3_I(inode)->i_file_acl, 1);
-+ ext3_free_blocks(handle, inode, EXT3_I(inode)->i_file_acl, 1, 1);
- get_bh(bh);
- ext3_forget(handle, 1, inode, bh, EXT3_I(inode)->i_file_acl);
- } else {
-Index: linux-2.6.5-7.282-full/fs/ext3/mballoc.c
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/mballoc.c 2006-10-23 18:07:54.821533176 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/mballoc.c 2006-10-24 22:20:45.000000000 +0400
-@@ -0,0 +1,2726 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ * - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ * - track min/max extents in each group for better group selection
-+ * - mb_mark_used() may allocate chunk right after splitting buddy
-+ * - special flag to advice allocator to look for requested + N blocks
-+ * this may improve interaction between extents and mballoc
-+ * - tree of groups sorted by number of free blocks
-+ * - percpu reservation code (hotpath)
-+ * - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...) printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS 30
-+
-+struct ext3_free_metadata {
-+ unsigned short group;
-+ unsigned short num;
-+ unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+ struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+ unsigned long bb_state;
-+ unsigned long bb_tid;
-+ struct ext3_free_metadata *bb_md_cur;
-+ unsigned short bb_first_free;
-+ unsigned short bb_free;
-+ unsigned short bb_fragments;
-+ unsigned short bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT3_GROUP_INFO_LOCKED_BIT 1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp) \
-+ (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+ __u16 fe_start;
-+ __u16 fe_len;
-+ __u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+ struct super_block *ac_sb;
-+
-+ /* search goals */
-+ struct ext3_free_extent ac_g_ex;
-+
-+ /* the best found extent */
-+ struct ext3_free_extent ac_b_ex;
-+
-+ /* number of iterations done. we have to track to limit searching */
-+ unsigned long ac_ex_scanned;
-+ __u16 ac_groups_scanned;
-+ __u16 ac_found;
-+ __u16 ac_tail;
-+ __u16 ac_buddy;
-+ __u8 ac_status;
-+ __u8 ac_flags; /* allocation hints */
-+ __u8 ac_criteria;
-+ __u8 ac_repeats;
-+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
-+ * N > 0, the field stores N, otherwise 0 */
-+
-+ struct page *ac_buddy_page;
-+ struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE 1
-+#define AC_STATUS_FOUND 2
-+#define AC_STATUS_BREAK 3
-+
-+struct ext3_mb_history {
-+ struct ext3_free_extent goal; /* goal allocation */
-+ struct ext3_free_extent result; /* result allocation */
-+ unsigned pid;
-+ unsigned ino;
-+ __u16 found; /* how many extents have been found */
-+ __u16 groups; /* how many groups have been scanned */
-+ __u16 tail; /* what tail broke some buddy */
-+ __u16 buddy; /* buddy the tail ^^^ broke */
-+ __u8 cr; /* which phase the result extent was found at */
-+ __u8 merged;
-+};
-+
-+struct ext3_buddy {
-+ struct page *bd_buddy_page;
-+ void *bd_buddy;
-+ struct page *bd_bitmap_page;
-+ void *bd_bitmap;
-+ struct ext3_group_info *bd_info;
-+ struct super_block *bd_sb;
-+ __u16 bd_blkbits;
-+ __u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+ struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 7UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~7UL); \
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 3UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~3UL); \
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+ int fix;
-+#if BITS_PER_LONG == 64
-+ fix = ((unsigned long) addr & 7UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+ fix = ((unsigned long) addr & 3UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+ max += fix;
-+ start += fix;
-+ return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+ char *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(max != NULL);
-+
-+ if (order > e3b->bd_blkbits + 1) {
-+ *max = 0;
-+ return NULL;
-+ }
-+
-+ /* at order 0 we see each particular block */
-+ *max = 1 << (e3b->bd_blkbits + 3);
-+ if (order == 0)
-+ return EXT3_MB_BITMAP(e3b);
-+
-+ bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+ *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+ return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+ int order = e3b->bd_blkbits + 1;
-+ int max, max2, i, j, k, count;
-+ int fragments = 0, fstart;
-+ void *buddy, *buddy2;
-+
-+ if (!test_opt(e3b->bd_sb, MBALLOC))
-+ return;
-+
-+ {
-+ static int mb_check_counter = 0;
-+ if (mb_check_counter++ % 300 != 0)
-+ return;
-+ }
-+
-+ while (order > 1) {
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+ J_ASSERT(buddy2);
-+ J_ASSERT(buddy != buddy2);
-+ J_ASSERT(max * 2 == max2);
-+
-+ count = 0;
-+ for (i = 0; i < max; i++) {
-+
-+ if (mb_test_bit(i, buddy)) {
-+ /* only single bit in buddy2 may be 1 */
-+ if (!mb_test_bit(i << 1, buddy2))
-+ J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+ else if (!mb_test_bit((i << 1) + 1, buddy2))
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ continue;
-+ }
-+
-+ /* both bits in buddy2 must be 0 */
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+ for (j = 0; j < (1 << order); j++) {
-+ k = (i * (1 << order)) + j;
-+ J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+ }
-+ count++;
-+ }
-+ J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+ order--;
-+ }
-+
-+ fstart = -1;
-+ buddy = mb_find_buddy(e3b, 0, &max);
-+ for (i = 0; i < max; i++) {
-+ if (!mb_test_bit(i, buddy)) {
-+ J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+ if (fstart == -1) {
-+ fragments++;
-+ fstart = i;
-+ }
-+ continue;
-+ }
-+ fstart = -1;
-+ /* check used bits only */
-+ for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+ buddy2 = mb_find_buddy(e3b, j, &max2);
-+ k = i >> j;
-+ J_ASSERT(k < max2);
-+ J_ASSERT(mb_test_bit(k, buddy2));
-+ }
-+ }
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+ J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+ int order;
-+
-+ if (word > 255) {
-+ order = 7;
-+ word >>= 8;
-+ } else {
-+ order = -1;
-+ }
-+
-+ do {
-+ order++;
-+ word >>= 1;
-+ } while (word != 0);
-+
-+ return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+ int len, struct ext3_group_info *grp)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ unsigned short min, max, chunk, border;
-+
-+ mb_debug("mark %u/%u free\n", first, len);
-+ J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ border = 2 << sb->s_blocksize_bits;
-+
-+ while (len > 0) {
-+ /* find how many blocks can be covered since this position */
-+ max = ffs(first | border) - 1;
-+
-+ /* find how many blocks of power 2 we need to mark */
-+ min = fmsb(len);
-+
-+ mb_debug(" %u/%u -> max %u, min %u\n",
-+ first & ((2 << sb->s_blocksize_bits) - 1),
-+ len, max, min);
-+
-+ if (max < min)
-+ min = max;
-+ chunk = 1 << min;
-+
-+ /* mark multiblock chunks only */
-+ grp->bb_counters[min]++;
-+ if (min > 0) {
-+ mb_debug(" set %u at %u \n", first >> min,
-+ sbi->s_mb_offsets[min]);
-+ mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+ }
-+
-+ len -= chunk;
-+ first += chunk;
-+ }
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+ int group)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+ unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+ unsigned short i = 0, first, len;
-+ unsigned free = 0, fragments = 0;
-+ unsigned long long period = get_cycles();
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+ grp->bb_first_free = i;
-+ while (i < max) {
-+ fragments++;
-+ first = i;
-+ i = ext2_find_next_le_bit(bitmap, max, i);
-+ len = i - first;
-+ free += len;
-+ if (len > 1)
-+ ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+ else
-+ grp->bb_counters[0]++;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+ grp->bb_fragments = fragments;
-+
-+ /* bb_state shouldn't being modified because all
-+ * others waits for init completion on page lock */
-+ clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+ if (free != grp->bb_free) {
-+ printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+ group, free, grp->bb_free);
-+ grp->bb_free = free;
-+ }
-+
-+ period = get_cycles() - period;
-+ spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+ EXT3_SB(sb)->s_mb_buddies_generated++;
-+ EXT3_SB(sb)->s_mb_generation_time += period;
-+ spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+ int blocksize, blocks_per_page, groups_per_page;
-+ int err = 0, i, first_group, first_block;
-+ struct super_block *sb;
-+ struct buffer_head *bhs;
-+ struct buffer_head **bh;
-+ struct inode *inode;
-+ char *data, *bitmap;
-+
-+ mb_debug("init page %lu\n", page->index);
-+
-+ inode = page->mapping->host;
-+ sb = inode->i_sb;
-+ blocksize = 1 << inode->i_blkbits;
-+ blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+ groups_per_page = blocks_per_page >> 1;
-+ if (groups_per_page == 0)
-+ groups_per_page = 1;
-+
-+ /* allocate buffer_heads to read bitmaps */
-+ if (groups_per_page > 1) {
-+ err = -ENOMEM;
-+ i = sizeof(struct buffer_head *) * groups_per_page;
-+ bh = kmalloc(i, GFP_NOFS);
-+ if (bh == NULL)
-+ goto out;
-+ memset(bh, 0, i);
-+ } else
-+ bh = &bhs;
-+
-+ first_group = page->index * blocks_per_page / 2;
-+
-+ /* read all groups the page covers into the cache */
-+ for (i = 0; i < groups_per_page; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ err = -EIO;
-+ desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+ if (desc == NULL)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ if (bh[i] == NULL)
-+ goto out;
-+
-+ if (buffer_uptodate(bh[i]))
-+ continue;
-+
-+ lock_buffer(bh[i]);
-+ if (buffer_uptodate(bh[i])) {
-+ unlock_buffer(bh[i]);
-+ continue;
-+ }
-+
-+ get_bh(bh[i]);
-+ bh[i]->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh[i]);
-+ mb_debug("read bitmap for group %u\n", first_group + i);
-+ }
-+
-+ /* wait for I/O completion */
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ wait_on_buffer(bh[i]);
-+
-+ err = -EIO;
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ if (!buffer_uptodate(bh[i]))
-+ goto out;
-+
-+ first_block = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++) {
-+ int group;
-+
-+ group = (first_block + i) >> 1;
-+ if (group >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ data = page_address(page) + (i * blocksize);
-+ bitmap = bh[group - first_group]->b_data;
-+
-+ if ((first_block + i) & 1) {
-+ /* this is block of buddy */
-+ mb_debug("put buddy for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memset(data, 0xff, blocksize);
-+ EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+ memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+ sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+ ext3_mb_generate_buddy(sb, data, bitmap, group);
-+ } else {
-+ /* this is block of bitmap */
-+ mb_debug("put bitmap for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memcpy(data, bitmap, blocksize);
-+ }
-+ }
-+ SetPageUptodate(page);
-+
-+out:
-+ if (bh) {
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ brelse(bh[i]);
-+ if (bh != &bhs)
-+ kfree(bh);
-+ }
-+ return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *inode = sbi->s_buddy_cache;
-+ int blocks_per_page, block, pnum, poff;
-+ struct page *page;
-+
-+ mb_debug("load group %u\n", group);
-+
-+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+ e3b->bd_blkbits = sb->s_blocksize_bits;
-+ e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+ e3b->bd_sb = sb;
-+ e3b->bd_group = group;
-+ e3b->bd_buddy_page = NULL;
-+ e3b->bd_bitmap_page = NULL;
-+
-+ block = group * 2;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ /* we could use find_or_create_page(), but it locks page
-+ * what we'd like to avoid in fast path ... */
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_bitmap_page = page;
-+ e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ block++;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_buddy_page = page;
-+ e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ return 0;
-+
-+err:
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+ e3b->bd_buddy = NULL;
-+ e3b->bd_bitmap = NULL;
-+ return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+ int order = 1;
-+ void *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+ bb = EXT3_MB_BUDDY(e3b);
-+ while (order <= e3b->bd_blkbits + 1) {
-+ block = block >> 1;
-+ if (!mb_test_bit(block, bb)) {
-+ /* this block is part of buddy of order 'order' */
-+ return order;
-+ }
-+ bb += 1 << (e3b->bd_blkbits - order);
-+ order++;
-+ }
-+ return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_clear_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0xffffffff;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_set_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+ int block = 0, max = 0, order;
-+ void *buddy, *buddy2;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free += count;
-+ if (first < e3b->bd_info->bb_first_free)
-+ e3b->bd_info->bb_first_free = first;
-+
-+ /* let's maintain fragments counter */
-+ if (first != 0)
-+ block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+ if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+ if (block && max)
-+ e3b->bd_info->bb_fragments--;
-+ else if (!block && !max)
-+ e3b->bd_info->bb_fragments++;
-+
-+ /* let's maintain buddy itself */
-+ while (count-- > 0) {
-+ block = first++;
-+ order = 0;
-+
-+ J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+ mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ /* start of the buddy */
-+ buddy = mb_find_buddy(e3b, order, &max);
-+
-+ do {
-+ block &= ~1UL;
-+ if (mb_test_bit(block, buddy) ||
-+ mb_test_bit(block + 1, buddy))
-+ break;
-+
-+ /* both the buddies are free, try to coalesce them */
-+ buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+ if (!buddy2)
-+ break;
-+
-+ if (order > 0) {
-+ /* for special purposes, we don't set
-+ * free bits in bitmap */
-+ mb_set_bit(block, buddy);
-+ mb_set_bit(block + 1, buddy);
-+ }
-+ e3b->bd_info->bb_counters[order]--;
-+ e3b->bd_info->bb_counters[order]--;
-+
-+ block = block >> 1;
-+ order++;
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ mb_clear_bit(block, buddy2);
-+ buddy = buddy2;
-+ } while (1);
-+ }
-+ mb_check_buddy(e3b);
-+
-+ return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+ int needed, struct ext3_free_extent *ex)
-+{
-+ int next = block, max, ord;
-+ void *buddy;
-+
-+ J_ASSERT(ex != NULL);
-+
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ J_ASSERT(block < max);
-+ if (mb_test_bit(block, buddy)) {
-+ ex->fe_len = 0;
-+ ex->fe_start = 0;
-+ ex->fe_group = 0;
-+ return 0;
-+ }
-+
-+ if (likely(order == 0)) {
-+ /* find actual order */
-+ order = mb_find_order_for_block(e3b, block);
-+ block = block >> order;
-+ }
-+
-+ ex->fe_len = 1 << order;
-+ ex->fe_start = block << order;
-+ ex->fe_group = e3b->bd_group;
-+
-+ /* calc difference from given start */
-+ next = next - ex->fe_start;
-+ ex->fe_len -= next;
-+ ex->fe_start += next;
-+
-+ while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+ if (block + 1 >= max)
-+ break;
-+
-+ next = (block + 1) * (1 << order);
-+ if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+ break;
-+
-+ ord = mb_find_order_for_block(e3b, next);
-+
-+ order = ord;
-+ block = next >> order;
-+ ex->fe_len += 1 << order;
-+ }
-+
-+ J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+ return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+ int ord, mlen = 0, max = 0, cur;
-+ int start = ex->fe_start;
-+ int len = ex->fe_len;
-+ unsigned ret = 0;
-+ int len0 = len;
-+ void *buddy;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free -= len;
-+ if (e3b->bd_info->bb_first_free == start)
-+ e3b->bd_info->bb_first_free += len;
-+
-+ /* let's maintain fragments counter */
-+ if (start != 0)
-+ mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+ if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+ if (mlen && max)
-+ e3b->bd_info->bb_fragments++;
-+ else if (!mlen && !max)
-+ e3b->bd_info->bb_fragments--;
-+
-+ /* let's maintain buddy itself */
-+ while (len) {
-+ ord = mb_find_order_for_block(e3b, start);
-+
-+ if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+ /* the whole chunk may be allocated at once! */
-+ mlen = 1 << ord;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ J_ASSERT((start >> ord) < max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+ start += mlen;
-+ len -= mlen;
-+ J_ASSERT(len >= 0);
-+ continue;
-+ }
-+
-+ /* store for history */
-+ if (ret == 0)
-+ ret = len | (ord << 16);
-+
-+ /* we have to split large buddy */
-+ J_ASSERT(ord > 0);
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+
-+ ord--;
-+ cur = (start >> ord) & ~1U;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_clear_bit(cur, buddy);
-+ mb_clear_bit(cur + 1, buddy);
-+ e3b->bd_info->bb_counters[ord]++;
-+ e3b->bd_info->bb_counters[ord]++;
-+ }
-+
-+ /* now drop all the bits in bitmap */
-+ mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+ mb_check_buddy(e3b);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ unsigned long ret;
-+
-+ ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+ ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+ ac->ac_status = AC_STATUS_FOUND;
-+ ac->ac_tail = ret & 0xffff;
-+ ac->ac_buddy = ret >> 16;
-+
-+ /* hold in-core structures until allocated
-+ * blocks are marked non-free in on-disk bitmap */
-+ ac->ac_buddy_page = e3b->bd_buddy_page;
-+ page_cache_get(e3b->bd_buddy_page);
-+ ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+ page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+ struct ext3_free_extent *ex,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent *bex = &ac->ac_b_ex;
-+ struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+ J_ASSERT(ex->fe_len > 0);
-+ J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+ J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+ ac->ac_found++;
-+
-+ /*
-+ * The special case - take what you catch first
-+ */
-+ if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * Let's check whether the chunk is good enough
-+ */
-+ if (ex->fe_len == gex->fe_len) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * If this is first found extent, just store it in the context
-+ */
-+ if (bex->fe_len == 0) {
-+ *bex = *ex;
-+ return;
-+ }
-+
-+ /*
-+ * If new found extent is better, store it in the context
-+ */
-+ if (bex->fe_len < gex->fe_len) {
-+ /* if the request isn't satisfied, any found extent
-+ * larger than previous best one is better */
-+ if (ex->fe_len > bex->fe_len)
-+ *bex = *ex;
-+ } else if (ex->fe_len > gex->fe_len) {
-+ /* if the request is satisfied, then we try to find
-+ * an extent that still satisfy the request, but is
-+ * smaller than previous one */
-+ *bex = *ex;
-+ }
-+
-+ /*
-+ * Let's scan at least few extents and don't pick up a first one
-+ */
-+ if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+
-+ /*
-+ * We don't want to scan for a whole year
-+ */
-+ if (ac->ac_found > ext3_mb_max_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent ex = ac->ac_b_ex;
-+ int group = ex.fe_group, max, err;
-+
-+ J_ASSERT(ex.fe_len > 0);
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+ if (max > 0) {
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ int group = ac->ac_g_ex.fe_group, max, err;
-+ struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+ struct ext3_super_block *es = sbi->s_es;
-+ struct ext3_free_extent ex;
-+
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+ ac->ac_g_ex.fe_len, &ex);
-+
-+ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+ unsigned long start;
-+ start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+ ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+ if (start % sbi->s_stripe == 0) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ } else if (max >= ac->ac_g_ex.fe_len) {
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+ /* Sometimes, caller may want to merge even small
-+ * number of blocks to an existing extent */
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_group_info *grp = e3b->bd_info;
-+ void *buddy;
-+ int i, k, max;
-+
-+ J_ASSERT(ac->ac_2order > 0);
-+ for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+ if (grp->bb_counters[i] == 0)
-+ continue;
-+
-+ buddy = mb_find_buddy(e3b, i, &max);
-+ if (buddy == NULL) {
-+ printk(KERN_ALERT "looking for wrong order?\n");
-+ break;
-+ }
-+
-+ k = mb_find_next_zero_bit(buddy, max, 0);
-+ J_ASSERT(k < max);
-+
-+ ac->ac_found++;
-+
-+ ac->ac_b_ex.fe_len = 1 << i;
-+ ac->ac_b_ex.fe_start = k << i;
-+ ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+ ext3_mb_use_best_found(ac, e3b);
-+ J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+ if (unlikely(ext3_mb_stats))
-+ atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+ break;
-+ }
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ int i, free;
-+
-+ free = e3b->bd_info->bb_free;
-+ J_ASSERT(free > 0);
-+
-+ i = e3b->bd_info->bb_first_free;
-+
-+ while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+ i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+ if (i >= sb->s_blocksize * 8) {
-+ J_ASSERT(free == 0);
-+ break;
-+ }
-+
-+ mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(free >= ex.fe_len);
-+
-+ ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+ i += ex.fe_len;
-+ free -= ex.fe_len;
-+ }
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ unsigned long i, max;
-+
-+ J_ASSERT(sbi->s_stripe != 0);
-+
-+ /* find first stripe-aligned block */
-+ i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + le32_to_cpu(sbi->s_es->s_first_data_block);
-+ i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+ i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
-+ % EXT3_BLOCKS_PER_GROUP(sb);
-+
-+ while (i < sb->s_blocksize * 8) {
-+ if (!mb_test_bit(i, bitmap)) {
-+ max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+ if (max >= sbi->s_stripe) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ break;
-+ }
-+ }
-+ i += sbi->s_stripe;
-+ }
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+ int group, int cr)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+ unsigned free, fragments, i, bits;
-+
-+ J_ASSERT(cr >= 0 && cr < 4);
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+ free = grp->bb_free;
-+ fragments = grp->bb_fragments;
-+ if (free == 0)
-+ return 0;
-+ if (fragments == 0)
-+ return 0;
-+
-+ switch (cr) {
-+ case 0:
-+ J_ASSERT(ac->ac_2order != 0);
-+ bits = ac->ac_sb->s_blocksize_bits + 1;
-+ for (i = ac->ac_2order; i <= bits; i++)
-+ if (grp->bb_counters[i] > 0)
-+ return 1;
-+ break;
-+ case 1:
-+ if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 2:
-+ if (free >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 3:
-+ return 1;
-+ default:
-+ BUG();
-+ }
-+
-+ return 0;
-+}
-+
-+int ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *len, int flags, int *errp)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_allocation_context ac;
-+ int i, group, block, cr, err = 0;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ struct buffer_head *gdp_bh;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+
-+ J_ASSERT(len != NULL);
-+ J_ASSERT(*len > 0);
-+
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk("ext3_mb_new_nblocks: nonexistent device");
-+ return 0;
-+ }
-+
-+ if (!test_opt(sb, MBALLOC)) {
-+ static int ext3_mballoc_warning = 0;
-+ if (ext3_mballoc_warning == 0) {
-+ printk(KERN_ERR "EXT3-fs: multiblock request with "
-+ "mballoc disabled!\n");
-+ ext3_mballoc_warning++;
-+ }
-+ *len = 1;
-+ err = ext3_new_block_old(handle, inode, goal, errp);
-+ return err;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+
-+ /*
-+ * We can't allocate > group size
-+ */
-+ if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+ *len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* someone asks for non-reserved blocks */
-+ BUG_ON(*len > 1);
-+ err = ext3_mb_reserve_blocks(sb, 1);
-+ if (err) {
-+ *errp = err;
-+ return 0;
-+ }
-+ }
-+
-+ ac.ac_buddy_page = NULL;
-+ ac.ac_bitmap_page = NULL;
-+
-+ /*
-+ * Check quota for allocation of this blocks.
-+ */
-+ while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+ *len -= 1;
-+ if (*len == 0) {
-+ *errp = -EDQUOT;
-+ block = 0;
-+ goto out;
-+ }
-+
-+ /* start searching from the goal */
-+ if (goal < le32_to_cpu(es->s_first_data_block) ||
-+ goal >= le32_to_cpu(es->s_blocks_count))
-+ goal = le32_to_cpu(es->s_first_data_block);
-+ group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ /* set up allocation goals */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_groups_scanned = 0;
-+ ac.ac_ex_scanned = 0;
-+ ac.ac_found = 0;
-+ ac.ac_sb = inode->i_sb;
-+ ac.ac_g_ex.fe_group = group;
-+ ac.ac_g_ex.fe_start = block;
-+ ac.ac_g_ex.fe_len = *len;
-+ ac.ac_flags = flags;
-+ ac.ac_2order = 0;
-+ ac.ac_criteria = 0;
-+
-+ if (*len == 1 && sbi->s_stripe) {
-+ /* looks like a metadata, let's use a dirty hack for raid5
-+ * move all metadata in first groups in hope to hit cached
-+ * sectors and thus avoid read-modify cycles in raid5 */
-+ ac.ac_g_ex.fe_group = group = 0;
-+ }
-+
-+ /* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+ i = ffs(*len);
-+ if (i >= ext3_mb_order2_reqs) {
-+ i--;
-+ if ((*len & (~(1 << i))) == 0)
-+ ac.ac_2order = i;
-+ }
-+
-+ /* first, try the goal */
-+ err = ext3_mb_find_by_goal(&ac, &e3b);
-+ if (err)
-+ goto out_err;
-+ if (ac.ac_status == AC_STATUS_FOUND)
-+ goto found;
-+
-+ /* Let's just scan groups to find more-less suitable blocks */
-+ cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+ for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+ ac.ac_criteria = cr;
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+ if (group == EXT3_SB(sb)->s_groups_count)
-+ group = 0;
-+
-+ if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+ /* we need full data about the group
-+ * to make a good selection */
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+ ext3_mb_release_desc(&e3b);
-+ }
-+
-+ /* check is group good for our criteries */
-+ if (!ext3_mb_good_group(&ac, group, cr))
-+ continue;
-+
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+
-+ ext3_lock_group(sb, group);
-+ if (!ext3_mb_good_group(&ac, group, cr)) {
-+ /* someone did allocation from this group */
-+ ext3_unlock_group(sb, group);
-+ ext3_mb_release_desc(&e3b);
-+ continue;
-+ }
-+
-+ ac.ac_groups_scanned++;
-+ if (cr == 0)
-+ ext3_mb_simple_scan_group(&ac, &e3b);
-+ else if (cr == 1 && *len == sbi->s_stripe)
-+ ext3_mb_scan_aligned(&ac, &e3b);
-+ else
-+ ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+ ext3_unlock_group(sb, group);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ if (ac.ac_status != AC_STATUS_CONTINUE)
-+ break;
-+ }
-+ }
-+
-+ if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+ !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+ /*
-+ * We've been searching too long. Let's try to allocate
-+ * the best chunk we've found so far
-+ */
-+
-+ /*if (ac.ac_found > ext3_mb_max_to_scan)
-+ printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+ "%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+ ac.ac_g_ex.fe_len);*/
-+ ext3_mb_try_best_found(&ac, &e3b);
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * Someone more lucky has already allocated it.
-+ * The only thing we can do is just take first
-+ * found block(s)
-+ printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+ */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+ cr = 3;
-+ goto repeat;
-+ }
-+ }
-+
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * We aren't lucky definitely
-+ */
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ *errp = -ENOSPC;
-+ block = 0;
-+#if 1
-+ printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+ ac.ac_status, ac.ac_flags);
-+ printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+ ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+ ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+ printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+ sbi->s_blocks_reserved, ac.ac_found);
-+ printk("EXT3-fs: groups: ");
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+ printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+ printk("\n");
-+#endif
-+ goto out;
-+ }
-+
-+found:
-+ J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+ /* good news - free block(s) have been found. now it's time
-+ * to mark block(s) in good old journaled bitmap */
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ /* we made a desicion, now mark found blocks in good old
-+ * bitmap to be journaled */
-+
-+ ext3_debug("using block group %d(%d)\n",
-+ ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+ bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+ if (!bitmap_bh) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) {
-+ *errp = err;
-+ goto out_err;
-+ }
-+
-+ gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+ if (!gdp) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error(sb, "ext3_new_block",
-+ "Allocating block in system zone - "
-+ "block = %u", block);
-+#ifdef AGGRESSIVE_CHECK
-+ for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+ J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+ mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+ spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+ - ac.ac_b_ex.fe_len);
-+ spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err)
-+ goto out_err;
-+ err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ sb->s_dirt = 1;
-+ *errp = 0;
-+ brelse(bitmap_bh);
-+
-+ /* drop non-allocated, but dquote'd blocks */
-+ J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+ DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+ *len = ac.ac_b_ex.fe_len;
-+ J_ASSERT(*len > 0);
-+ J_ASSERT(block != 0);
-+ goto out;
-+
-+out_err:
-+ /* if we've already allocated something, roll it back */
-+ if (ac.ac_status == AC_STATUS_FOUND) {
-+ /* FIXME: free blocks here */
-+ }
-+
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ brelse(bitmap_bh);
-+ *errp = err;
-+ block = 0;
-+out:
-+ if (ac.ac_buddy_page)
-+ page_cache_release(ac.ac_buddy_page);
-+ if (ac.ac_bitmap_page)
-+ page_cache_release(ac.ac_bitmap_page);
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* block wasn't reserved before and we reserved it
-+ * at the beginning of allocation. it doesn't matter
-+ * whether we allocated anything or we failed: time
-+ * to release reservation. NOTE: because I expect
-+ * any multiblock request from delayed allocation
-+ * path only, here is single block always */
-+ ext3_mb_release_blocks(sb, 1);
-+ }
-+
-+ if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+ atomic_inc(&sbi->s_bal_reqs);
-+ atomic_add(*len, &sbi->s_bal_allocated);
-+ if (*len >= ac.ac_g_ex.fe_len)
-+ atomic_inc(&sbi->s_bal_success);
-+ atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+ if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+ ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+ atomic_inc(&sbi->s_bal_goals);
-+ if (ac.ac_found > ext3_mb_max_to_scan)
-+ atomic_inc(&sbi->s_bal_breaks);
-+ }
-+
-+ ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+ return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+ struct ext3_mb_history *history;
-+ struct super_block *sb;
-+ int start;
-+ int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+ struct ext3_mb_history *hs,
-+ int first)
-+{
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (!first && hs == s->history + s->start)
-+ return NULL;
-+ while (hs->goal.fe_len == 0) {
-+ hs++;
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (hs == s->history + s->start)
-+ return NULL;
-+ }
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs;
-+ int l = *pos;
-+
-+ if (l == 0)
-+ return SEQ_START_TOKEN;
-+ hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ if (!hs)
-+ return NULL;
-+ while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs = v;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ else
-+ return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+ struct ext3_mb_history *hs = v;
-+ char buf[20], buf2[20];
-+
-+ if (v == SEQ_START_TOKEN) {
-+ seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+ "pid", "inode", "goal", "result", "found", "grps", "cr",
-+ "merge", "tail", "broken");
-+ return 0;
-+ }
-+
-+ sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+ hs->goal.fe_start, hs->goal.fe_len);
-+ sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+ hs->result.fe_start, hs->result.fe_len);
-+ seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+ hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+ hs->cr, hs->merged ? "M" : "", hs->tail,
-+ hs->buddy ? 1 << hs->buddy : 0);
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+ .start = ext3_mb_seq_history_start,
-+ .next = ext3_mb_seq_history_next,
-+ .stop = ext3_mb_seq_history_stop,
-+ .show = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_proc_session *s;
-+ int rc, size;
-+
-+ s = kmalloc(sizeof(*s), GFP_KERNEL);
-+ if (s == NULL)
-+ return -EIO;
-+ size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+ s->history = kmalloc(size, GFP_KERNEL);
-+ if (s == NULL) {
-+ kfree(s);
-+ return -EIO;
-+ }
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(s->history, sbi->s_mb_history, size);
-+ s->max = sbi->s_mb_history_max;
-+ s->start = sbi->s_mb_history_cur % s->max;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+
-+ rc = seq_open(file, &ext3_mb_seq_history_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = s;
-+ } else {
-+ kfree(s->history);
-+ kfree(s);
-+ }
-+ return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+ struct seq_file *seq = (struct seq_file *)file->private_data;
-+ struct ext3_mb_proc_session *s = seq->private;
-+ kfree(s->history);
-+ kfree(s);
-+ return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_history_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+
-+ group = *pos + 1;
-+ return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ ++*pos;
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+ group = *pos + 1;
-+ return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+ struct super_block *sb = seq->private;
-+ long group = (long) v, i;
-+ struct sg {
-+ struct ext3_group_info info;
-+ unsigned short counters[16];
-+ } sg;
-+
-+ group--;
-+ if (group == 0)
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+ "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+ "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+ "2^11", "2^12", "2^13");
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+ sizeof(struct ext3_group_info);
-+ ext3_lock_group(sb, group);
-+ memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+ ext3_unlock_group(sb, group);
-+
-+ if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+ return 0;
-+
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+ sg.info.bb_fragments, sg.info.bb_first_free);
-+ for (i = 0; i <= 13; i++)
-+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+ sg.info.bb_counters[i] : 0);
-+ seq_printf(seq, " ]\n");
-+
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+ .start = ext3_mb_seq_groups_start,
-+ .next = ext3_mb_seq_groups_next,
-+ .stop = ext3_mb_seq_groups_stop,
-+ .show = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ int rc;
-+
-+ rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = sb;
-+ }
-+ return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_groups_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+ remove_proc_entry("mb_history", sbi->s_mb_proc);
-+ remove_proc_entry(name, proc_root_ext3);
-+
-+ if (sbi->s_mb_history)
-+ kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+ int i;
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+ if (sbi->s_mb_proc != NULL) {
-+ struct proc_dir_entry *p;
-+ p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_history_fops;
-+ p->data = sb;
-+ }
-+ p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_groups_fops;
-+ p->data = sb;
-+ }
-+ }
-+
-+ sbi->s_mb_history_max = 1000;
-+ sbi->s_mb_history_cur = 0;
-+ spin_lock_init(&sbi->s_mb_history_lock);
-+ i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+ sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+ memset(sbi->s_mb_history, 0, i);
-+ /* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+ struct ext3_allocation_context *ac)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_history h;
-+
-+ if (likely(sbi->s_mb_history == NULL))
-+ return;
-+
-+ h.pid = current->pid;
-+ h.ino = ino;
-+ h.goal = ac->ac_g_ex;
-+ h.result = ac->ac_b_ex;
-+ h.found = ac->ac_found;
-+ h.cr = ac->ac_criteria;
-+ h.groups = ac->ac_groups_scanned;
-+ h.tail = ac->ac_tail;
-+ h.buddy = ac->ac_buddy;
-+ h.merged = 0;
-+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+ h.merged = 1;
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+ if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+ sbi->s_mb_history_cur = 0;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, j, len, metalen;
-+ int num_meta_group_infos =
-+ (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ struct ext3_group_info **meta_group_info;
-+
-+ /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+ * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+ * So a two level scheme suffices for now. */
-+ sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+ num_meta_group_infos, GFP_KERNEL);
-+ if (sbi->s_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+ return -ENOMEM;
-+ }
-+ sbi->s_buddy_cache = new_inode(sb);
-+ if (sbi->s_buddy_cache == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+ goto err_freesgi;
-+ }
-+
-+ metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++) {
-+ if ((i + 1) == num_meta_group_infos)
-+ metalen = sizeof(*meta_group_info) *
-+ (sbi->s_groups_count -
-+ (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+ meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+ if (meta_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+ "buddy group\n");
-+ goto err_freemeta;
-+ }
-+ sbi->s_group_info[i] = meta_group_info;
-+ }
-+
-+ /*
-+ * calculate needed size. if change bb_counters size,
-+ * don't forget about ext3_mb_generate_buddy()
-+ */
-+ len = sizeof(struct ext3_group_info);
-+ len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+ for (i = 0; i < sbi->s_groups_count; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ meta_group_info =
-+ sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+ j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+ meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+ if (meta_group_info[j] == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+ i--;
-+ goto err_freebuddy;
-+ }
-+ desc = ext3_get_group_desc(sb, i, NULL);
-+ if (desc == NULL) {
-+ printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+ goto err_freebuddy;
-+ }
-+ memset(meta_group_info[j], 0, len);
-+ set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+ &meta_group_info[j]->bb_state);
-+ meta_group_info[j]->bb_free =
-+ le16_to_cpu(desc->bg_free_blocks_count);
-+ }
-+
-+ return 0;
-+
-+err_freebuddy:
-+ while (i >= 0) {
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ i--;
-+ }
-+ i = num_meta_group_infos;
-+err_freemeta:
-+ while (--i >= 0)
-+ kfree(sbi->s_group_info[i]);
-+ iput(sbi->s_buddy_cache);
-+err_freesgi:
-+ kfree(sbi->s_group_info);
-+ return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *root = sb->s_root->d_inode;
-+ unsigned i, offset, max;
-+ struct dentry *dentry;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+ sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_offsets == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ return -ENOMEM;
-+ }
-+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_maxs == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+
-+ /* order 0 is regular bitmap */
-+ sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+ sbi->s_mb_offsets[0] = 0;
-+
-+ i = 1;
-+ offset = 0;
-+ max = sb->s_blocksize << 2;
-+ do {
-+ sbi->s_mb_offsets[i] = offset;
-+ sbi->s_mb_maxs[i] = max;
-+ offset += 1 << (sb->s_blocksize_bits - i);
-+ max = max >> 1;
-+ i++;
-+ } while (i <= sb->s_blocksize_bits + 1);
-+
-+ /* init file for buddy data */
-+ if ((i = ext3_mb_init_backend(sb))) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return i;
-+ }
-+
-+ spin_lock_init(&sbi->s_reserve_lock);
-+ spin_lock_init(&sbi->s_md_lock);
-+ INIT_LIST_HEAD(&sbi->s_active_transaction);
-+ INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+ INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+ spin_lock_init(&sbi->s_bal_lock);
-+
-+ /* remove old on-disk buddy file */
-+ down(&root->i_sem);
-+ dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+ if (dentry->d_inode != NULL) {
-+ i = vfs_unlink(root, dentry);
-+ if (i != 0)
-+ printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+ }
-+ dput(dentry);
-+ up(&root->i_sem);
-+
-+ ext3_mb_history_init(sb);
-+
-+ printk("EXT3-fs: mballoc enabled\n");
-+ return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, num_meta_group_infos;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ /* release freed, non-committed blocks */
-+ spin_lock(&sbi->s_md_lock);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_committed_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ ext3_mb_free_committed_blocks(sb);
-+
-+ if (sbi->s_group_info) {
-+ for (i = 0; i < sbi->s_groups_count; i++)
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ num_meta_group_infos = (sbi->s_groups_count +
-+ EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++)
-+ kfree(sbi->s_group_info[i]);
-+ kfree(sbi->s_group_info);
-+ }
-+ if (sbi->s_mb_offsets)
-+ kfree(sbi->s_mb_offsets);
-+ if (sbi->s_mb_maxs)
-+ kfree(sbi->s_mb_maxs);
-+ if (sbi->s_buddy_cache)
-+ iput(sbi->s_buddy_cache);
-+ if (sbi->s_blocks_reserved)
-+ printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+ sbi->s_blocks_reserved);
-+ if (ext3_mb_stats) {
-+ printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+ atomic_read(&sbi->s_bal_allocated),
-+ atomic_read(&sbi->s_bal_reqs),
-+ atomic_read(&sbi->s_bal_success));
-+ printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+ "%u 2^N hits, %u breaks\n",
-+ atomic_read(&sbi->s_bal_ex_scanned),
-+ atomic_read(&sbi->s_bal_goals),
-+ atomic_read(&sbi->s_bal_2orders),
-+ atomic_read(&sbi->s_bal_breaks));
-+ printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+ sbi->s_mb_buddies_generated++,
-+ sbi->s_mb_generation_time);
-+ }
-+
-+ ext3_mb_history_release(sb);
-+
-+ return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int err, i, count = 0, count2 = 0;
-+ struct ext3_free_metadata *md;
-+ struct ext3_buddy e3b;
-+
-+ if (list_empty(&sbi->s_committed_transaction))
-+ return;
-+
-+ /* there is committed blocks to be freed yet */
-+ do {
-+ /* get next array of blocks */
-+ md = NULL;
-+ spin_lock(&sbi->s_md_lock);
-+ if (!list_empty(&sbi->s_committed_transaction)) {
-+ md = list_entry(sbi->s_committed_transaction.next,
-+ struct ext3_free_metadata, list);
-+ list_del(&md->list);
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ if (md == NULL)
-+ break;
-+
-+ mb_debug("gonna free %u blocks in group %u (0x%p):",
-+ md->num, md->group, md);
-+
-+ err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ /* there are blocks to put in buddy to make them really free */
-+ count += md->num;
-+ count2++;
-+ ext3_lock_group(sb, md->group);
-+ for (i = 0; i < md->num; i++) {
-+ mb_debug(" %u", md->blocks[i]);
-+ mb_free_blocks(&e3b, md->blocks[i], 1);
-+ }
-+ mb_debug("\n");
-+ ext3_unlock_group(sb, md->group);
-+
-+ /* balance refcounts from ext3_mb_free_metadata() */
-+ page_cache_release(e3b.bd_buddy_page);
-+ page_cache_release(e3b.bd_bitmap_page);
-+
-+ kfree(md);
-+ ext3_mb_release_desc(&e3b);
-+
-+ } while (md);
-+ mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+ return;
-+
-+ /* new transaction! time to close last one and free blocks for
-+ * committed transaction. we know that only transaction can be
-+ * active, so previos transaction can be being logged and we
-+ * know that transaction before previous is known to be already
-+ * logged. this means that now we may free blocks freed in all
-+ * transactions before previous one. hope I'm clear enough ... */
-+
-+ spin_lock(&sbi->s_md_lock);
-+ if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+ mb_debug("new transaction %lu, old %lu\n",
-+ (unsigned long) handle->h_transaction->t_tid,
-+ (unsigned long) sbi->s_last_transaction);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_closed_transaction);
-+ sbi->s_last_transaction = handle->h_transaction->t_tid;
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+ int group, int block, int count)
-+{
-+ struct ext3_group_info *db = e3b->bd_info;
-+ struct super_block *sb = e3b->bd_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_free_metadata *md;
-+ int i;
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ ext3_lock_group(sb, group);
-+ for (i = 0; i < count; i++) {
-+ md = db->bb_md_cur;
-+ if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+ db->bb_md_cur = NULL;
-+ md = NULL;
-+ }
-+
-+ if (md == NULL) {
-+ ext3_unlock_group(sb, group);
-+ md = kmalloc(sizeof(*md), GFP_KERNEL);
-+ if (md == NULL)
-+ return -ENOMEM;
-+ md->num = 0;
-+ md->group = group;
-+
-+ ext3_lock_group(sb, group);
-+ if (db->bb_md_cur == NULL) {
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&md->list, &sbi->s_active_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ /* protect buddy cache from being freed,
-+ * otherwise we'll refresh it from
-+ * on-disk bitmap and lose not-yet-available
-+ * blocks */
-+ page_cache_get(e3b->bd_buddy_page);
-+ page_cache_get(e3b->bd_bitmap_page);
-+ db->bb_md_cur = md;
-+ db->bb_tid = handle->h_transaction->t_tid;
-+ mb_debug("new md 0x%p for group %u\n",
-+ md, md->group);
-+ } else {
-+ kfree(md);
-+ md = db->bb_md_cur;
-+ }
-+ }
-+
-+ BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+ md->blocks[md->num] = block + i;
-+ md->num++;
-+ if (md->num == EXT3_BB_MAX_BLOCKS) {
-+ /* no more space, put full container on a sb's list */
-+ db->bb_md_cur = NULL;
-+ }
-+ }
-+ ext3_unlock_group(sb, group);
-+ return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long block, unsigned long count,
-+ int metadata, int *freed)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ unsigned long bit, overflow;
-+ struct buffer_head *gd_bh;
-+ unsigned long block_group;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ int err = 0, ret;
-+
-+ *freed = 0;
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk ("ext3_free_blocks: nonexistent device");
-+ return;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+ if (block < le32_to_cpu(es->s_first_data_block) ||
-+ block + count < block ||
-+ block + count > le32_to_cpu(es->s_blocks_count)) {
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks not in datazone - "
-+ "block = %lu, count = %lu", block, count);
-+ goto error_return;
-+ }
-+
-+ ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+ overflow = 0;
-+ block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ /*
-+ * Check to see if we are freeing blocks across a group
-+ * boundary.
-+ */
-+ if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+ overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+ count -= overflow;
-+ }
-+ brelse(bitmap_bh);
-+ bitmap_bh = read_block_bitmap(sb, block_group);
-+ if (!bitmap_bh)
-+ goto error_return;
-+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+ if (!gdp)
-+ goto error_return;
-+
-+ if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+ in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+ in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group) ||
-+ in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks in system zones - "
-+ "Block = %lu, count = %lu",
-+ block, count);
-+
-+ BUFFER_TRACE(bitmap_bh, "getting write access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err)
-+ goto error_return;
-+
-+ /*
-+ * We are about to modify some metadata. Call the journal APIs
-+ * to unshare ->b_data if a currently-committing transaction is
-+ * using it
-+ */
-+ BUFFER_TRACE(gd_bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, gd_bh);
-+ if (err)
-+ goto error_return;
-+
-+ err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+ if (err)
-+ goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+ {
-+ int i;
-+ for (i = 0; i < count; i++)
-+ J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+ }
-+#endif
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+ /* We dirtied the bitmap block */
-+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+ if (metadata) {
-+ /* blocks being freed are metadata. these blocks shouldn't
-+ * be used until this transaction is committed */
-+ ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+ } else {
-+ ext3_lock_group(sb, block_group);
-+ mb_free_blocks(&e3b, bit, count);
-+ ext3_unlock_group(sb, block_group);
-+ }
-+
-+ spin_lock(sb_bgl_lock(sbi, block_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+ spin_unlock(sb_bgl_lock(sbi, block_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ *freed = count;
-+
-+ /* And the group descriptor block */
-+ BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+ ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+ if (!err) err = ret;
-+
-+ if (overflow && !err) {
-+ block += count;
-+ count = overflow;
-+ goto do_more;
-+ }
-+ sb->s_dirt = 1;
-+error_return:
-+ brelse(bitmap_bh);
-+ ext3_std_error(sb, err);
-+ return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int free, ret = -ENOSPC;
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+ if (blocks <= free - sbi->s_blocks_reserved) {
-+ sbi->s_blocks_reserved += blocks;
-+ ret = 0;
-+ }
-+ spin_unlock(&sbi->s_reserve_lock);
-+ return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ sbi->s_blocks_reserved -= blocks;
-+ WARN_ON(sbi->s_blocks_reserved < 0);
-+ if (sbi->s_blocks_reserved < 0)
-+ sbi->s_blocks_reserved = 0;
-+ spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+int ext3_new_block(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *errp)
-+{
-+ int ret, len;
-+
-+ if (!test_opt(inode->i_sb, MBALLOC)) {
-+ ret = ext3_new_block_old(handle, inode, goal, errp);
-+ goto out;
-+ }
-+ len = 1;
-+ ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+ return ret;
-+}
-+
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+ unsigned long block, unsigned long count, int metadata)
-+{
-+ struct super_block *sb;
-+ int freed;
-+
-+ sb = inode->i_sb;
-+ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+ ext3_free_blocks_old(handle, inode, block, count);
-+ else {
-+ ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
-+ if (freed)
-+ DQUOT_FREE_BLOCK(inode, freed);
-+ }
-+ return;
-+}
-+
-+#define EXT3_ROOT "ext3"
-+#define EXT3_MB_STATS_NAME "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_stats);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_STATS_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+ return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_max_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_min_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_order2_reqs = value;
-+
-+ return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+ struct proc_dir_entry *proc_ext3_mb_stats;
-+ struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+ proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+ if (proc_root_ext3 == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+ return -EIO;
-+ }
-+
-+ /* Initialize EXT3_MB_STATS_NAME */
-+ proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_stats == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_STATS_NAME);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_stats->data = NULL;
-+ proc_ext3_mb_stats->read_proc = ext3_mb_stats_read;
-+ proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+ /* Initialize EXT3_MAX_TO_SCAN_NAME */
-+ proc_ext3_mb_max_to_scan = create_proc_entry(
-+ EXT3_MB_MAX_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_max_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_max_to_scan->data = NULL;
-+ proc_ext3_mb_max_to_scan->read_proc = ext3_mb_max_to_scan_read;
-+ proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+ /* Initialize EXT3_MIN_TO_SCAN_NAME */
-+ proc_ext3_mb_min_to_scan = create_proc_entry(
-+ EXT3_MB_MIN_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_min_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_min_to_scan->data = NULL;
-+ proc_ext3_mb_min_to_scan->read_proc = ext3_mb_min_to_scan_read;
-+ proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+ /* Initialize EXT3_ORDER2_REQ */
-+ proc_ext3_mb_order2_req = create_proc_entry(
-+ EXT3_MB_ORDER2_REQ,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_order2_req == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_ORDER2_REQ);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_order2_req->data = NULL;
-+ proc_ext3_mb_order2_req->read_proc = ext3_mb_order2_req_read;
-+ proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+ return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-2.6.5-7.282-full/fs/ext3/Makefile
-===================================================================
---- linux-2.6.5-7.282-full.orig/fs/ext3/Makefile 2006-10-24 22:18:28.000000000 +0400
-+++ linux-2.6.5-7.282-full/fs/ext3/Makefile 2006-10-24 22:18:28.000000000 +0400
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o \
-- extents.o
-+ extents.o mballoc.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+++ /dev/null
-Index: linux-2.6.12.6-bull/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12.6-bull.orig/include/linux/ext3_fs.h 2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/include/linux/ext3_fs.h 2006-04-29 20:39:10.000000000 +0400
-@@ -57,6 +57,14 @@ struct statfs;
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT3_MULTIBLOCK_ALLOCATOR 1
-+
-+#define EXT3_MB_HINT_MERGE 1
-+#define EXT3_MB_HINT_RESERVED 2
-+#define EXT3_MB_HINT_METADATA 4
-+#define EXT3_MB_HINT_FIRST 8
-+#define EXT3_MB_HINT_BEST 16
-+
- /*
- * Special inodes numbers
- */
-@@ -366,6 +374,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC 0x800000/* Buddy allocation support */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -387,6 +396,14 @@ struct ext3_inode {
- #define ext3_find_first_zero_bit ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit ext2_find_next_zero_bit
-
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif /* __LITTLE_ENDIAN */
-+#endif /* !ext2_find_next_le_bit */
-+
- /*
- * Maximal mount counts between two filesystem checks
- */
-@@ -727,7 +736,8 @@ extern int ext3_bg_has_super(struct supe
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
- extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
-- unsigned long);
-+ unsigned long, int);
-+extern int ext3_new_block_old(handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
- unsigned long, unsigned long, int *);
- extern unsigned long ext3_count_free_blocks (struct super_block *);
-@@ -848,6 +857,17 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *, int);
-+extern int ext3_mb_release(struct super_block *);
-+extern int ext3_mb_new_blocks(handle_t *, struct inode *, unsigned long, int *, int, int *);
-+extern int ext3_mb_reserve_blocks(struct super_block *, int);
-+extern void ext3_mb_release_blocks(struct super_block *, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif /* __KERNEL__ */
-
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-2.6.12.6-bull/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.12.6-bull.orig/include/linux/ext3_fs_sb.h 2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/include/linux/ext3_fs_sb.h 2006-04-29 20:39:10.000000000 +0400
-@@ -21,8 +21,14 @@
- #include <linux/wait.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
-
- /*
- * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@ struct ext3_sb_info {
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
- #endif
-+
-+ /* for buddy allocator */
-+ struct ext3_group_info ***s_group_info;
-+ struct inode *s_buddy_cache;
-+ long s_blocks_reserved;
-+ spinlock_t s_reserve_lock;
-+ struct list_head s_active_transaction;
-+ struct list_head s_closed_transaction;
-+ struct list_head s_committed_transaction;
-+ spinlock_t s_md_lock;
-+ tid_t s_last_transaction;
-+ int s_mb_factor;
-+ unsigned short *s_mb_offsets, *s_mb_maxs;
-+ unsigned long s_stripe;
-+
-+ /* history to debug policy */
-+ struct ext3_mb_history *s_mb_history;
-+ int s_mb_history_cur;
-+ int s_mb_history_max;
-+ struct proc_dir_entry *s_mb_proc;
-+ spinlock_t s_mb_history_lock;
-+
-+ /* stats for buddy allocator */
-+ atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_t s_bal_success; /* we found long enough chunks */
-+ atomic_t s_bal_allocated; /* in blocks */
-+ atomic_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_t s_bal_goals; /* goal hits */
-+ atomic_t s_bal_breaks; /* too long searches */
-+ atomic_t s_bal_2orders; /* 2^order hits */
-+ spinlock_t s_bal_lock;
-+ unsigned long s_mb_buddies_generated;
-+ unsigned long long s_mb_generation_time;
- };
-+
-+#define EXT3_GROUP_INFO(sb, group) \
-+ EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+ [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
-
- #endif /* _LINUX_EXT3_FS_SB */
-Index: linux-2.6.12.6-bull/fs/ext3/super.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/super.c 2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/super.c 2006-04-29 20:39:10.000000000 +0400
-@@ -387,6 +387,7 @@ static void ext3_put_super (struct super
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_mb_release(sb);
- ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
-@@ -597,6 +598,7 @@ enum {
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_mballoc, Opt_nomballoc, Opt_stripe,
- };
-
- static match_table_t tokens = {
-@@ -650,6 +651,9 @@ static match_table_t tokens = {
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
- {Opt_extdebug, "extdebug"},
-+ {Opt_mballoc, "mballoc"},
-+ {Opt_nomballoc, "nomballoc"},
-+ {Opt_stripe, "stripe=%u"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -965,6 +967,19 @@ clear_qf_name:
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-+ case Opt_mballoc:
-+ set_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_nomballoc:
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_stripe:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_stripe = option;
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1670,6 +1675,7 @@ static int ext3_fill_super (struct super
- ext3_count_dirs(sb));
-
- ext3_ext_init(sb);
-+ ext3_mb_init(sb, needs_recovery);
- lock_kernel();
- return 0;
-
-@@ -2549,7 +2555,13 @@ static struct file_system_type ext3_fs_t
-
- static int __init init_ext3_fs(void)
- {
-- int err = init_ext3_xattr();
-+ int err;
-+
-+ err = init_ext3_proc();
-+ if (err)
-+ return err;
-+
-+ err = init_ext3_xattr();
- if (err)
- return err;
- err = init_inodecache();
-@@ -2571,6 +2583,7 @@ static void __exit exit_ext3_fs(void)
- unregister_filesystem(&ext3_fs_type);
- destroy_inodecache();
- exit_ext3_xattr();
-+ exit_ext3_proc();
- }
-
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-2.6.12.6-bull/fs/ext3/extents.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/extents.c 2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/extents.c 2006-04-29 20:39:10.000000000 +0400
-@@ -777,7 +777,7 @@ cleanup:
- for (i = 0; i < depth; i++) {
- if (!ablocks[i])
- continue;
-- ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- }
- }
- kfree(ablocks);
-@@ -1434,7 +1434,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- path->p_idx->ei_leaf);
- bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-- ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- return err;
- }
-
-@@ -1919,10 +1919,12 @@ ext3_remove_blocks(struct ext3_extents_t
- int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- handle_t *handle = ext3_journal_start(tree->inode, needed);
- struct buffer_head *bh;
-- int i;
-+ int i, metadata = 0;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-+ if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+ metadata = 1;
- if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- /* tail removal */
- unsigned long num, start;
-@@ -1934,7 +1936,7 @@ ext3_remove_blocks(struct ext3_extents_t
- bh = sb_find_get_block(tree->inode->i_sb, start + i);
- ext3_forget(handle, 0, tree->inode, bh, start + i);
- }
-- ext3_free_blocks(handle, tree->inode, start, num);
-+ ext3_free_blocks(handle, tree->inode, start, num, metadata);
- } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, ex->ee_block, ex->ee_len);
-Index: linux-2.6.12.6-bull/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/inode.c 2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/inode.c 2006-04-29 20:39:10.000000000 +0400
-@@ -564,7 +564,7 @@ static int ext3_alloc_branch(handle_t *h
- ext3_journal_forget(handle, branch[i].bh);
- }
- for (i = 0; i < keys; i++)
-- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
-+ ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1, 1);
- return err;
- }
-
-@@ -1850,7 +1850,7 @@ ext3_clear_blocks(handle_t *handle, stru
- }
- }
-
-- ext3_free_blocks(handle, inode, block_to_free, count);
-+ ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
-
- /**
-@@ -2023,7 +2023,7 @@ static void ext3_free_branches(handle_t
- ext3_journal_test_restart(handle, inode);
- }
-
-- ext3_free_blocks(handle, inode, nr, 1);
-+ ext3_free_blocks(handle, inode, nr, 1, 1);
-
- if (parent_bh) {
- /*
-Index: linux-2.6.12.6-bull/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/balloc.c 2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/balloc.c 2006-04-29 20:39:10.000000000 +0400
-@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
- *
- * Return buffer_head on success or NULL in case of failure.
- */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- struct ext3_group_desc * desc;
-@@ -490,24 +490,6 @@ error_return:
- return;
- }
-
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
-- unsigned long block, unsigned long count)
--{
-- struct super_block * sb;
-- int dquot_freed_blocks;
--
-- sb = inode->i_sb;
-- if (!sb) {
-- printk ("ext3_free_blocks: nonexistent device");
-- return;
-- }
-- ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-- if (dquot_freed_blocks)
-- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
-- return;
--}
--
- /*
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy. This
-@@ -1162,7 +1144,7 @@ int ext3_should_retry_alloc(struct super
- * bitmap, and then for any free bit if that fails.
- * This function also updates quota and i_blocks field.
- */
--int ext3_new_block(handle_t *handle, struct inode *inode,
-+int ext3_new_block_old(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
- {
- struct buffer_head *bitmap_bh = NULL;
-Index: linux-2.6.12.6-bull/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/xattr.c 2005-08-29 20:55:27.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/xattr.c 2006-04-29 20:39:10.000000000 +0400
-@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl
- ea_bdebug(bh, "refcount now=0; freeing");
- if (ce)
- mb_cache_entry_free(ce);
-- ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-+ ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
- get_bh(bh);
- ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
- } else {
-@@ -804,7 +804,7 @@ inserted:
- new_bh = sb_getblk(sb, block);
- if (!new_bh) {
- getblk_failed:
-- ext3_free_blocks(handle, inode, block, 1);
-+ ext3_free_blocks(handle, inode, block, 1, 1);
- error = -EIO;
- goto cleanup;
- }
-Index: linux-2.6.12.6-bull/fs/ext3/mballoc.c
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/mballoc.c 2006-04-22 17:31:47.543334750 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/mballoc.c 2006-04-30 01:24:11.000000000 +0400
-@@ -0,0 +1,2725 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ * - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ * - track min/max extents in each group for better group selection
-+ * - mb_mark_used() may allocate chunk right after splitting buddy
-+ * - special flag to advice allocator to look for requested + N blocks
-+ * this may improve interaction between extents and mballoc
-+ * - tree of groups sorted by number of free blocks
-+ * - percpu reservation code (hotpath)
-+ * - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...) printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS 30
-+
-+struct ext3_free_metadata {
-+ unsigned short group;
-+ unsigned short num;
-+ unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+ struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+ unsigned long bb_state;
-+ unsigned long bb_tid;
-+ struct ext3_free_metadata *bb_md_cur;
-+ unsigned short bb_first_free;
-+ unsigned short bb_free;
-+ unsigned short bb_fragments;
-+ unsigned short bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT3_GROUP_INFO_LOCKED_BIT 1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp) \
-+ (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+ __u16 fe_start;
-+ __u16 fe_len;
-+ __u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+ struct super_block *ac_sb;
-+
-+ /* search goals */
-+ struct ext3_free_extent ac_g_ex;
-+
-+ /* the best found extent */
-+ struct ext3_free_extent ac_b_ex;
-+
-+ /* number of iterations done. we have to track to limit searching */
-+ unsigned long ac_ex_scanned;
-+ __u16 ac_groups_scanned;
-+ __u16 ac_found;
-+ __u16 ac_tail;
-+ __u16 ac_buddy;
-+ __u8 ac_status;
-+ __u8 ac_flags; /* allocation hints */
-+ __u8 ac_criteria;
-+ __u8 ac_repeats;
-+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
-+ * N > 0, the field stores N, otherwise 0 */
-+
-+ struct page *ac_buddy_page;
-+ struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE 1
-+#define AC_STATUS_FOUND 2
-+#define AC_STATUS_BREAK 3
-+
-+struct ext3_mb_history {
-+ struct ext3_free_extent goal; /* goal allocation */
-+ struct ext3_free_extent result; /* result allocation */
-+ unsigned pid;
-+ unsigned ino;
-+ __u16 found; /* how many extents have been found */
-+ __u16 groups; /* how many groups have been scanned */
-+ __u16 tail; /* what tail broke some buddy */
-+ __u16 buddy; /* buddy the tail ^^^ broke */
-+ __u8 cr; /* which phase the result extent was found at */
-+ __u8 merged;
-+};
-+
-+struct ext3_buddy {
-+ struct page *bd_buddy_page;
-+ void *bd_buddy;
-+ struct page *bd_bitmap_page;
-+ void *bd_bitmap;
-+ struct ext3_group_info *bd_info;
-+ struct super_block *bd_sb;
-+ __u16 bd_blkbits;
-+ __u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+ struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 7UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~7UL); \
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 3UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~3UL); \
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+ int fix;
-+#if BITS_PER_LONG == 64
-+ fix = ((unsigned long) addr & 7UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+ fix = ((unsigned long) addr & 3UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+ max += fix;
-+ start += fix;
-+ return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+ char *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(max != NULL);
-+
-+ if (order > e3b->bd_blkbits + 1) {
-+ *max = 0;
-+ return NULL;
-+ }
-+
-+ /* at order 0 we see each particular block */
-+ *max = 1 << (e3b->bd_blkbits + 3);
-+ if (order == 0)
-+ return EXT3_MB_BITMAP(e3b);
-+
-+ bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+ *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+ return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+ int order = e3b->bd_blkbits + 1;
-+ int max, max2, i, j, k, count;
-+ int fragments = 0, fstart;
-+ void *buddy, *buddy2;
-+
-+ if (!test_opt(e3b->bd_sb, MBALLOC))
-+ return;
-+
-+ {
-+ static int mb_check_counter = 0;
-+ if (mb_check_counter++ % 300 != 0)
-+ return;
-+ }
-+
-+ while (order > 1) {
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+ J_ASSERT(buddy2);
-+ J_ASSERT(buddy != buddy2);
-+ J_ASSERT(max * 2 == max2);
-+
-+ count = 0;
-+ for (i = 0; i < max; i++) {
-+
-+ if (mb_test_bit(i, buddy)) {
-+ /* only single bit in buddy2 may be 1 */
-+ if (!mb_test_bit(i << 1, buddy2))
-+ J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+ else if (!mb_test_bit((i << 1) + 1, buddy2))
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ continue;
-+ }
-+
-+ /* both bits in buddy2 must be 0 */
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+ for (j = 0; j < (1 << order); j++) {
-+ k = (i * (1 << order)) + j;
-+ J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+ }
-+ count++;
-+ }
-+ J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+ order--;
-+ }
-+
-+ fstart = -1;
-+ buddy = mb_find_buddy(e3b, 0, &max);
-+ for (i = 0; i < max; i++) {
-+ if (!mb_test_bit(i, buddy)) {
-+ J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+ if (fstart == -1) {
-+ fragments++;
-+ fstart = i;
-+ }
-+ continue;
-+ }
-+ fstart = -1;
-+ /* check used bits only */
-+ for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+ buddy2 = mb_find_buddy(e3b, j, &max2);
-+ k = i >> j;
-+ J_ASSERT(k < max2);
-+ J_ASSERT(mb_test_bit(k, buddy2));
-+ }
-+ }
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+ J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+ int order;
-+
-+ if (word > 255) {
-+ order = 7;
-+ word >>= 8;
-+ } else {
-+ order = -1;
-+ }
-+
-+ do {
-+ order++;
-+ word >>= 1;
-+ } while (word != 0);
-+
-+ return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+ int len, struct ext3_group_info *grp)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ unsigned short min, max, chunk, border;
-+
-+ mb_debug("mark %u/%u free\n", first, len);
-+ J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ border = 2 << sb->s_blocksize_bits;
-+
-+ while (len > 0) {
-+ /* find how many blocks can be covered since this position */
-+ max = ffs(first | border) - 1;
-+
-+ /* find how many blocks of power 2 we need to mark */
-+ min = fmsb(len);
-+
-+ mb_debug(" %u/%u -> max %u, min %u\n",
-+ first & ((2 << sb->s_blocksize_bits) - 1),
-+ len, max, min);
-+
-+ if (max < min)
-+ min = max;
-+ chunk = 1 << min;
-+
-+ /* mark multiblock chunks only */
-+ grp->bb_counters[min]++;
-+ if (min > 0) {
-+ mb_debug(" set %u at %u \n", first >> min,
-+ sbi->s_mb_offsets[min]);
-+ mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+ }
-+
-+ len -= chunk;
-+ first += chunk;
-+ }
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+ int group)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+ unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+ unsigned short i = 0, first, len;
-+ unsigned free = 0, fragments = 0;
-+ unsigned long long period = get_cycles();
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+ grp->bb_first_free = i;
-+ while (i < max) {
-+ fragments++;
-+ first = i;
-+ i = ext2_find_next_le_bit(bitmap, max, i);
-+ len = i - first;
-+ free += len;
-+ if (len > 1)
-+ ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+ else
-+ grp->bb_counters[0]++;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+ grp->bb_fragments = fragments;
-+
-+ /* bb_state shouldn't being modified because all
-+ * others waits for init completion on page lock */
-+ clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+ if (free != grp->bb_free) {
-+ printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+ group, free, grp->bb_free);
-+ grp->bb_free = free;
-+ }
-+
-+ period = get_cycles() - period;
-+ spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+ EXT3_SB(sb)->s_mb_buddies_generated++;
-+ EXT3_SB(sb)->s_mb_generation_time += period;
-+ spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+ int blocksize, blocks_per_page, groups_per_page;
-+ int err = 0, i, first_group, first_block;
-+ struct super_block *sb;
-+ struct buffer_head *bhs;
-+ struct buffer_head **bh;
-+ struct inode *inode;
-+ char *data, *bitmap;
-+
-+ mb_debug("init page %lu\n", page->index);
-+
-+ inode = page->mapping->host;
-+ sb = inode->i_sb;
-+ blocksize = 1 << inode->i_blkbits;
-+ blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+ groups_per_page = blocks_per_page >> 1;
-+ if (groups_per_page == 0)
-+ groups_per_page = 1;
-+
-+ /* allocate buffer_heads to read bitmaps */
-+ if (groups_per_page > 1) {
-+ err = -ENOMEM;
-+ i = sizeof(struct buffer_head *) * groups_per_page;
-+ bh = kmalloc(i, GFP_NOFS);
-+ if (bh == NULL)
-+ goto out;
-+ memset(bh, 0, i);
-+ } else
-+ bh = &bhs;
-+
-+ first_group = page->index * blocks_per_page / 2;
-+
-+ /* read all groups the page covers into the cache */
-+ for (i = 0; i < groups_per_page; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ err = -EIO;
-+ desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+ if (desc == NULL)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ if (bh[i] == NULL)
-+ goto out;
-+
-+ if (buffer_uptodate(bh[i]))
-+ continue;
-+
-+ lock_buffer(bh[i]);
-+ if (buffer_uptodate(bh[i])) {
-+ unlock_buffer(bh[i]);
-+ continue;
-+ }
-+
-+ get_bh(bh[i]);
-+ bh[i]->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh[i]);
-+ mb_debug("read bitmap for group %u\n", first_group + i);
-+ }
-+
-+ /* wait for I/O completion */
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ wait_on_buffer(bh[i]);
-+
-+ err = -EIO;
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ if (!buffer_uptodate(bh[i]))
-+ goto out;
-+
-+ first_block = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++) {
-+ int group;
-+
-+ group = (first_block + i) >> 1;
-+ if (group >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ data = page_address(page) + (i * blocksize);
-+ bitmap = bh[group - first_group]->b_data;
-+
-+ if ((first_block + i) & 1) {
-+ /* this is block of buddy */
-+ mb_debug("put buddy for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memset(data, 0xff, blocksize);
-+ EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+ memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+ sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+ ext3_mb_generate_buddy(sb, data, bitmap, group);
-+ } else {
-+ /* this is block of bitmap */
-+ mb_debug("put bitmap for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memcpy(data, bitmap, blocksize);
-+ }
-+ }
-+ SetPageUptodate(page);
-+
-+out:
-+ if (bh) {
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ brelse(bh[i]);
-+ if (bh != &bhs)
-+ kfree(bh);
-+ }
-+ return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *inode = sbi->s_buddy_cache;
-+ int blocks_per_page, block, pnum, poff;
-+ struct page *page;
-+
-+ mb_debug("load group %u\n", group);
-+
-+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+ e3b->bd_blkbits = sb->s_blocksize_bits;
-+ e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+ e3b->bd_sb = sb;
-+ e3b->bd_group = group;
-+ e3b->bd_buddy_page = NULL;
-+ e3b->bd_bitmap_page = NULL;
-+
-+ block = group * 2;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ /* we could use find_or_create_page(), but it locks page
-+ * what we'd like to avoid in fast path ... */
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_bitmap_page = page;
-+ e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ block++;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_buddy_page = page;
-+ e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ return 0;
-+
-+err:
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+ e3b->bd_buddy = NULL;
-+ e3b->bd_bitmap = NULL;
-+ return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+ int order = 1;
-+ void *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+ bb = EXT3_MB_BUDDY(e3b);
-+ while (order <= e3b->bd_blkbits + 1) {
-+ block = block >> 1;
-+ if (!mb_test_bit(block, bb)) {
-+ /* this block is part of buddy of order 'order' */
-+ return order;
-+ }
-+ bb += 1 << (e3b->bd_blkbits - order);
-+ order++;
-+ }
-+ return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_clear_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0xffffffff;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_set_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+ int block = 0, max = 0, order;
-+ void *buddy, *buddy2;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free += count;
-+ if (first < e3b->bd_info->bb_first_free)
-+ e3b->bd_info->bb_first_free = first;
-+
-+ /* let's maintain fragments counter */
-+ if (first != 0)
-+ block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+ if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+ if (block && max)
-+ e3b->bd_info->bb_fragments--;
-+ else if (!block && !max)
-+ e3b->bd_info->bb_fragments++;
-+
-+ /* let's maintain buddy itself */
-+ while (count-- > 0) {
-+ block = first++;
-+ order = 0;
-+
-+ J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+ mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ /* start of the buddy */
-+ buddy = mb_find_buddy(e3b, order, &max);
-+
-+ do {
-+ block &= ~1UL;
-+ if (mb_test_bit(block, buddy) ||
-+ mb_test_bit(block + 1, buddy))
-+ break;
-+
-+ /* both the buddies are free, try to coalesce them */
-+ buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+ if (!buddy2)
-+ break;
-+
-+ if (order > 0) {
-+ /* for special purposes, we don't set
-+ * free bits in bitmap */
-+ mb_set_bit(block, buddy);
-+ mb_set_bit(block + 1, buddy);
-+ }
-+ e3b->bd_info->bb_counters[order]--;
-+ e3b->bd_info->bb_counters[order]--;
-+
-+ block = block >> 1;
-+ order++;
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ mb_clear_bit(block, buddy2);
-+ buddy = buddy2;
-+ } while (1);
-+ }
-+ mb_check_buddy(e3b);
-+
-+ return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+ int needed, struct ext3_free_extent *ex)
-+{
-+ int next = block, max, ord;
-+ void *buddy;
-+
-+ J_ASSERT(ex != NULL);
-+
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ J_ASSERT(block < max);
-+ if (mb_test_bit(block, buddy)) {
-+ ex->fe_len = 0;
-+ ex->fe_start = 0;
-+ ex->fe_group = 0;
-+ return 0;
-+ }
-+
-+ if (likely(order == 0)) {
-+ /* find actual order */
-+ order = mb_find_order_for_block(e3b, block);
-+ block = block >> order;
-+ }
-+
-+ ex->fe_len = 1 << order;
-+ ex->fe_start = block << order;
-+ ex->fe_group = e3b->bd_group;
-+
-+ /* calc difference from given start */
-+ next = next - ex->fe_start;
-+ ex->fe_len -= next;
-+ ex->fe_start += next;
-+
-+ while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+ if (block + 1 >= max)
-+ break;
-+
-+ next = (block + 1) * (1 << order);
-+ if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+ break;
-+
-+ ord = mb_find_order_for_block(e3b, next);
-+
-+ order = ord;
-+ block = next >> order;
-+ ex->fe_len += 1 << order;
-+ }
-+
-+ J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+ return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+ int ord, mlen = 0, max = 0, cur;
-+ int start = ex->fe_start;
-+ int len = ex->fe_len;
-+ unsigned ret = 0;
-+ int len0 = len;
-+ void *buddy;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free -= len;
-+ if (e3b->bd_info->bb_first_free == start)
-+ e3b->bd_info->bb_first_free += len;
-+
-+ /* let's maintain fragments counter */
-+ if (start != 0)
-+ mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+ if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+ if (mlen && max)
-+ e3b->bd_info->bb_fragments++;
-+ else if (!mlen && !max)
-+ e3b->bd_info->bb_fragments--;
-+
-+ /* let's maintain buddy itself */
-+ while (len) {
-+ ord = mb_find_order_for_block(e3b, start);
-+
-+ if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+ /* the whole chunk may be allocated at once! */
-+ mlen = 1 << ord;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ J_ASSERT((start >> ord) < max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+ start += mlen;
-+ len -= mlen;
-+ J_ASSERT(len >= 0);
-+ continue;
-+ }
-+
-+ /* store for history */
-+ if (ret == 0)
-+ ret = len | (ord << 16);
-+
-+ /* we have to split large buddy */
-+ J_ASSERT(ord > 0);
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+
-+ ord--;
-+ cur = (start >> ord) & ~1U;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_clear_bit(cur, buddy);
-+ mb_clear_bit(cur + 1, buddy);
-+ e3b->bd_info->bb_counters[ord]++;
-+ e3b->bd_info->bb_counters[ord]++;
-+ }
-+
-+ /* now drop all the bits in bitmap */
-+ mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+ mb_check_buddy(e3b);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ unsigned long ret;
-+
-+ ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+ ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+ ac->ac_status = AC_STATUS_FOUND;
-+ ac->ac_tail = ret & 0xffff;
-+ ac->ac_buddy = ret >> 16;
-+
-+ /* hold in-core structures until allocated
-+ * blocks are marked non-free in on-disk bitmap */
-+ ac->ac_buddy_page = e3b->bd_buddy_page;
-+ page_cache_get(e3b->bd_buddy_page);
-+ ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+ page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+ struct ext3_free_extent *ex,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent *bex = &ac->ac_b_ex;
-+ struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+ J_ASSERT(ex->fe_len > 0);
-+ J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+ J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+ ac->ac_found++;
-+
-+ /*
-+ * The special case - take what you catch first
-+ */
-+ if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * Let's check whether the chunk is good enough
-+ */
-+ if (ex->fe_len == gex->fe_len) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * If this is first found extent, just store it in the context
-+ */
-+ if (bex->fe_len == 0) {
-+ *bex = *ex;
-+ return;
-+ }
-+
-+ /*
-+ * If new found extent is better, store it in the context
-+ */
-+ if (bex->fe_len < gex->fe_len) {
-+ /* if the request isn't satisfied, any found extent
-+ * larger than previous best one is better */
-+ if (ex->fe_len > bex->fe_len)
-+ *bex = *ex;
-+ } else if (ex->fe_len > gex->fe_len) {
-+ /* if the request is satisfied, then we try to find
-+ * an extent that still satisfy the request, but is
-+ * smaller than previous one */
-+ *bex = *ex;
-+ }
-+
-+ /*
-+ * Let's scan at least few extents and don't pick up a first one
-+ */
-+ if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+
-+ /*
-+ * We don't want to scan for a whole year
-+ */
-+ if (ac->ac_found > ext3_mb_max_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent ex = ac->ac_b_ex;
-+ int group = ex.fe_group, max, err;
-+
-+ J_ASSERT(ex.fe_len > 0);
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+ if (max > 0) {
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ int group = ac->ac_g_ex.fe_group, max, err;
-+ struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+ struct ext3_super_block *es = sbi->s_es;
-+ struct ext3_free_extent ex;
-+
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+ ac->ac_g_ex.fe_len, &ex);
-+
-+ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+ unsigned long start;
-+ start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+ ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+ if (start % sbi->s_stripe == 0) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ } else if (max >= ac->ac_g_ex.fe_len) {
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+ /* Sometimes, caller may want to merge even small
-+ * number of blocks to an existing extent */
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_group_info *grp = e3b->bd_info;
-+ void *buddy;
-+ int i, k, max;
-+
-+ J_ASSERT(ac->ac_2order > 0);
-+ for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+ if (grp->bb_counters[i] == 0)
-+ continue;
-+
-+ buddy = mb_find_buddy(e3b, i, &max);
-+ if (buddy == NULL) {
-+ printk(KERN_ALERT "looking for wrong order?\n");
-+ break;
-+ }
-+
-+ k = mb_find_next_zero_bit(buddy, max, 0);
-+ J_ASSERT(k < max);
-+
-+ ac->ac_found++;
-+
-+ ac->ac_b_ex.fe_len = 1 << i;
-+ ac->ac_b_ex.fe_start = k << i;
-+ ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+ ext3_mb_use_best_found(ac, e3b);
-+ J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+ if (unlikely(ext3_mb_stats))
-+ atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+ break;
-+ }
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ int i, free;
-+
-+ free = e3b->bd_info->bb_free;
-+ J_ASSERT(free > 0);
-+
-+ i = e3b->bd_info->bb_first_free;
-+
-+ while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+ i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+ if (i >= sb->s_blocksize * 8) {
-+ J_ASSERT(free == 0);
-+ break;
-+ }
-+
-+ mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(free >= ex.fe_len);
-+
-+ ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+ i += ex.fe_len;
-+ free -= ex.fe_len;
-+ }
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ unsigned long i, max;
-+
-+ J_ASSERT(sbi->s_stripe != 0);
-+
-+ /* find first stripe-aligned block */
-+ i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb) +
-+ le32_to_cpu(sbi->s_es->s_first_data_block);
-+ i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+ i = (i - le32_to_cpu(sbi->s_es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+
-+ while (i < sb->s_blocksize * 8) {
-+ if (!mb_test_bit(i, bitmap)) {
-+ max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+ if (max >= sbi->s_stripe) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ break;
-+ }
-+ }
-+ i += sbi->s_stripe;
-+ }
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+ int group, int cr)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+ unsigned free, fragments, i, bits;
-+
-+ J_ASSERT(cr >= 0 && cr < 4);
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+ free = grp->bb_free;
-+ fragments = grp->bb_fragments;
-+ if (free == 0)
-+ return 0;
-+ if (fragments == 0)
-+ return 0;
-+
-+ switch (cr) {
-+ case 0:
-+ J_ASSERT(ac->ac_2order != 0);
-+ bits = ac->ac_sb->s_blocksize_bits + 1;
-+ for (i = ac->ac_2order; i <= bits; i++)
-+ if (grp->bb_counters[i] > 0)
-+ return 1;
-+ break;
-+ case 1:
-+ if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 2:
-+ if (free >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 3:
-+ return 1;
-+ default:
-+ BUG();
-+ }
-+
-+ return 0;
-+}
-+
-+int ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *len, int flags, int *errp)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_allocation_context ac;
-+ int i, group, block, cr, err = 0;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ struct buffer_head *gdp_bh;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+
-+ J_ASSERT(len != NULL);
-+ J_ASSERT(*len > 0);
-+
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk("ext3_mb_new_nblocks: nonexistent device");
-+ return 0;
-+ }
-+
-+ if (!test_opt(sb, MBALLOC)) {
-+ static int ext3_mballoc_warning = 0;
-+ if (ext3_mballoc_warning == 0) {
-+ printk(KERN_ERR "EXT3-fs: multiblock request with "
-+ "mballoc disabled!\n");
-+ ext3_mballoc_warning++;
-+ }
-+ *len = 1;
-+ err = ext3_new_block_old(handle, inode, goal, errp);
-+ return err;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+
-+ /*
-+ * We can't allocate > group size
-+ */
-+ if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+ *len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* someone asks for non-reserved blocks */
-+ BUG_ON(*len > 1);
-+ err = ext3_mb_reserve_blocks(sb, 1);
-+ if (err) {
-+ *errp = err;
-+ return 0;
-+ }
-+ }
-+
-+ ac.ac_buddy_page = NULL;
-+ ac.ac_bitmap_page = NULL;
-+
-+ /*
-+ * Check quota for allocation of this blocks.
-+ */
-+ while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+ *len -= 1;
-+ if (*len == 0) {
-+ *errp = -EDQUOT;
-+ block = 0;
-+ goto out;
-+ }
-+
-+ /* start searching from the goal */
-+ if (goal < le32_to_cpu(es->s_first_data_block) ||
-+ goal >= le32_to_cpu(es->s_blocks_count))
-+ goal = le32_to_cpu(es->s_first_data_block);
-+ group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ /* set up allocation goals */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_groups_scanned = 0;
-+ ac.ac_ex_scanned = 0;
-+ ac.ac_found = 0;
-+ ac.ac_sb = inode->i_sb;
-+ ac.ac_g_ex.fe_group = group;
-+ ac.ac_g_ex.fe_start = block;
-+ ac.ac_g_ex.fe_len = *len;
-+ ac.ac_flags = flags;
-+ ac.ac_2order = 0;
-+ ac.ac_criteria = 0;
-+
-+ if (*len == 1 && sbi->s_stripe) {
-+ /* looks like a metadata, let's use a dirty hack for raid5
-+ * move all metadata in first groups in hope to hit cached
-+ * sectors and thus avoid read-modify cycles in raid5 */
-+ ac.ac_g_ex.fe_group = group = 0;
-+ }
-+
-+ /* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+ i = ffs(*len);
-+ if (i >= ext3_mb_order2_reqs) {
-+ i--;
-+ if ((*len & (~(1 << i))) == 0)
-+ ac.ac_2order = i;
-+ }
-+
-+ /* first, try the goal */
-+ err = ext3_mb_find_by_goal(&ac, &e3b);
-+ if (err)
-+ goto out_err;
-+ if (ac.ac_status == AC_STATUS_FOUND)
-+ goto found;
-+
-+ /* Let's just scan groups to find more-less suitable blocks */
-+ cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+ for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+ ac.ac_criteria = cr;
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+ if (group == EXT3_SB(sb)->s_groups_count)
-+ group = 0;
-+
-+ if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+ /* we need full data about the group
-+ * to make a good selection */
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+ ext3_mb_release_desc(&e3b);
-+ }
-+
-+ /* check is group good for our criteries */
-+ if (!ext3_mb_good_group(&ac, group, cr))
-+ continue;
-+
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+
-+ ext3_lock_group(sb, group);
-+ if (!ext3_mb_good_group(&ac, group, cr)) {
-+ /* someone did allocation from this group */
-+ ext3_unlock_group(sb, group);
-+ ext3_mb_release_desc(&e3b);
-+ continue;
-+ }
-+
-+ ac.ac_groups_scanned++;
-+ if (cr == 0)
-+ ext3_mb_simple_scan_group(&ac, &e3b);
-+ else if (cr == 1 && *len == sbi->s_stripe)
-+ ext3_mb_scan_aligned(&ac, &e3b);
-+ else
-+ ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+ ext3_unlock_group(sb, group);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ if (ac.ac_status != AC_STATUS_CONTINUE)
-+ break;
-+ }
-+ }
-+
-+ if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+ !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+ /*
-+ * We've been searching too long. Let's try to allocate
-+ * the best chunk we've found so far
-+ */
-+
-+ /*if (ac.ac_found > ext3_mb_max_to_scan)
-+ printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+ "%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+ ac.ac_g_ex.fe_len);*/
-+ ext3_mb_try_best_found(&ac, &e3b);
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * Someone more lucky has already allocated it.
-+ * The only thing we can do is just take first
-+ * found block(s)
-+ printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+ */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+ cr = 3;
-+ goto repeat;
-+ }
-+ }
-+
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * We aren't lucky definitely
-+ */
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ *errp = -ENOSPC;
-+ block = 0;
-+#if 1
-+ printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+ ac.ac_status, ac.ac_flags);
-+ printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+ ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+ ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+ printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+ sbi->s_blocks_reserved, ac.ac_found);
-+ printk("EXT3-fs: groups: ");
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+ printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+ printk("\n");
-+#endif
-+ goto out;
-+ }
-+
-+found:
-+ J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+ /* good news - free block(s) have been found. now it's time
-+ * to mark block(s) in good old journaled bitmap */
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ /* we made a desicion, now mark found blocks in good old
-+ * bitmap to be journaled */
-+
-+ ext3_debug("using block group %d(%d)\n",
-+ ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+ bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+ if (!bitmap_bh) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) {
-+ *errp = err;
-+ goto out_err;
-+ }
-+
-+ gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+ if (!gdp) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error(sb, "ext3_new_block",
-+ "Allocating block in system zone - "
-+ "block = %u", block);
-+#ifdef AGGRESSIVE_CHECK
-+ for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+ J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+ mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+ spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+ - ac.ac_b_ex.fe_len);
-+ spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err)
-+ goto out_err;
-+ err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ sb->s_dirt = 1;
-+ *errp = 0;
-+ brelse(bitmap_bh);
-+
-+ /* drop non-allocated, but dquote'd blocks */
-+ J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+ DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+ *len = ac.ac_b_ex.fe_len;
-+ J_ASSERT(*len > 0);
-+ J_ASSERT(block != 0);
-+ goto out;
-+
-+out_err:
-+ /* if we've already allocated something, roll it back */
-+ if (ac.ac_status == AC_STATUS_FOUND) {
-+ /* FIXME: free blocks here */
-+ }
-+
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ brelse(bitmap_bh);
-+ *errp = err;
-+ block = 0;
-+out:
-+ if (ac.ac_buddy_page)
-+ page_cache_release(ac.ac_buddy_page);
-+ if (ac.ac_bitmap_page)
-+ page_cache_release(ac.ac_bitmap_page);
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* block wasn't reserved before and we reserved it
-+ * at the beginning of allocation. it doesn't matter
-+ * whether we allocated anything or we failed: time
-+ * to release reservation. NOTE: because I expect
-+ * any multiblock request from delayed allocation
-+ * path only, here is single block always */
-+ ext3_mb_release_blocks(sb, 1);
-+ }
-+
-+ if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+ atomic_inc(&sbi->s_bal_reqs);
-+ atomic_add(*len, &sbi->s_bal_allocated);
-+ if (*len >= ac.ac_g_ex.fe_len)
-+ atomic_inc(&sbi->s_bal_success);
-+ atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+ if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+ ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+ atomic_inc(&sbi->s_bal_goals);
-+ if (ac.ac_found > ext3_mb_max_to_scan)
-+ atomic_inc(&sbi->s_bal_breaks);
-+ }
-+
-+ ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+ return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+ struct ext3_mb_history *history;
-+ struct super_block *sb;
-+ int start;
-+ int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+ struct ext3_mb_history *hs,
-+ int first)
-+{
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (!first && hs == s->history + s->start)
-+ return NULL;
-+ while (hs->goal.fe_len == 0) {
-+ hs++;
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (hs == s->history + s->start)
-+ return NULL;
-+ }
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs;
-+ int l = *pos;
-+
-+ if (l == 0)
-+ return SEQ_START_TOKEN;
-+ hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ if (!hs)
-+ return NULL;
-+ while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs = v;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ else
-+ return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+ struct ext3_mb_history *hs = v;
-+ char buf[20], buf2[20];
-+
-+ if (v == SEQ_START_TOKEN) {
-+ seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+ "pid", "inode", "goal", "result", "found", "grps", "cr",
-+ "merge", "tail", "broken");
-+ return 0;
-+ }
-+
-+ sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+ hs->goal.fe_start, hs->goal.fe_len);
-+ sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+ hs->result.fe_start, hs->result.fe_len);
-+ seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+ hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+ hs->cr, hs->merged ? "M" : "", hs->tail,
-+ hs->buddy ? 1 << hs->buddy : 0);
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+ .start = ext3_mb_seq_history_start,
-+ .next = ext3_mb_seq_history_next,
-+ .stop = ext3_mb_seq_history_stop,
-+ .show = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_proc_session *s;
-+ int rc, size;
-+
-+ s = kmalloc(sizeof(*s), GFP_KERNEL);
-+ if (s == NULL)
-+ return -EIO;
-+ size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+ s->history = kmalloc(size, GFP_KERNEL);
-+ if (s == NULL) {
-+ kfree(s);
-+ return -EIO;
-+ }
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(s->history, sbi->s_mb_history, size);
-+ s->max = sbi->s_mb_history_max;
-+ s->start = sbi->s_mb_history_cur % s->max;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+
-+ rc = seq_open(file, &ext3_mb_seq_history_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = s;
-+ } else {
-+ kfree(s->history);
-+ kfree(s);
-+ }
-+ return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+ struct seq_file *seq = (struct seq_file *)file->private_data;
-+ struct ext3_mb_proc_session *s = seq->private;
-+ kfree(s->history);
-+ kfree(s);
-+ return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_history_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+
-+ group = *pos + 1;
-+ return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ ++*pos;
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+ group = *pos + 1;
-+ return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+ struct super_block *sb = seq->private;
-+ long group = (long) v, i;
-+ struct sg {
-+ struct ext3_group_info info;
-+ unsigned short counters[16];
-+ } sg;
-+
-+ group--;
-+ if (group == 0)
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+ "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+ "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+ "2^11", "2^12", "2^13");
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+ sizeof(struct ext3_group_info);
-+ ext3_lock_group(sb, group);
-+ memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+ ext3_unlock_group(sb, group);
-+
-+ if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+ return 0;
-+
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+ sg.info.bb_fragments, sg.info.bb_first_free);
-+ for (i = 0; i <= 13; i++)
-+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+ sg.info.bb_counters[i] : 0);
-+ seq_printf(seq, " ]\n");
-+
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+ .start = ext3_mb_seq_groups_start,
-+ .next = ext3_mb_seq_groups_next,
-+ .stop = ext3_mb_seq_groups_stop,
-+ .show = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ int rc;
-+
-+ rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = sb;
-+ }
-+ return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_groups_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+ remove_proc_entry("mb_history", sbi->s_mb_proc);
-+ remove_proc_entry(name, proc_root_ext3);
-+
-+ if (sbi->s_mb_history)
-+ kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+ int i;
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+ if (sbi->s_mb_proc != NULL) {
-+ struct proc_dir_entry *p;
-+ p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_history_fops;
-+ p->data = sb;
-+ }
-+ p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_groups_fops;
-+ p->data = sb;
-+ }
-+ }
-+
-+ sbi->s_mb_history_max = 1000;
-+ sbi->s_mb_history_cur = 0;
-+ spin_lock_init(&sbi->s_mb_history_lock);
-+ i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+ sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+ memset(sbi->s_mb_history, 0, i);
-+ /* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+ struct ext3_allocation_context *ac)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_history h;
-+
-+ if (likely(sbi->s_mb_history == NULL))
-+ return;
-+
-+ h.pid = current->pid;
-+ h.ino = ino;
-+ h.goal = ac->ac_g_ex;
-+ h.result = ac->ac_b_ex;
-+ h.found = ac->ac_found;
-+ h.cr = ac->ac_criteria;
-+ h.groups = ac->ac_groups_scanned;
-+ h.tail = ac->ac_tail;
-+ h.buddy = ac->ac_buddy;
-+ h.merged = 0;
-+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+ h.merged = 1;
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+ if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+ sbi->s_mb_history_cur = 0;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, j, len, metalen;
-+ int num_meta_group_infos =
-+ (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ struct ext3_group_info **meta_group_info;
-+
-+ /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+ * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+ * So a two level scheme suffices for now. */
-+ sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+ num_meta_group_infos, GFP_KERNEL);
-+ if (sbi->s_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+ return -ENOMEM;
-+ }
-+ sbi->s_buddy_cache = new_inode(sb);
-+ if (sbi->s_buddy_cache == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+ goto err_freesgi;
-+ }
-+
-+ metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++) {
-+ if ((i + 1) == num_meta_group_infos)
-+ metalen = sizeof(*meta_group_info) *
-+ (sbi->s_groups_count -
-+ (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+ meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+ if (meta_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+ "buddy group\n");
-+ goto err_freemeta;
-+ }
-+ sbi->s_group_info[i] = meta_group_info;
-+ }
-+
-+ /*
-+ * calculate needed size. if change bb_counters size,
-+ * don't forget about ext3_mb_generate_buddy()
-+ */
-+ len = sizeof(struct ext3_group_info);
-+ len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+ for (i = 0; i < sbi->s_groups_count; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ meta_group_info =
-+ sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+ j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+ meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+ if (meta_group_info[j] == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+ i--;
-+ goto err_freebuddy;
-+ }
-+ desc = ext3_get_group_desc(sb, i, NULL);
-+ if (desc == NULL) {
-+ printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+ goto err_freebuddy;
-+ }
-+ memset(meta_group_info[j], 0, len);
-+ set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+ &meta_group_info[j]->bb_state);
-+ meta_group_info[j]->bb_free =
-+ le16_to_cpu(desc->bg_free_blocks_count);
-+ }
-+
-+ return 0;
-+
-+err_freebuddy:
-+ while (i >= 0) {
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ i--;
-+ }
-+ i = num_meta_group_infos;
-+err_freemeta:
-+ while (--i >= 0)
-+ kfree(sbi->s_group_info[i]);
-+ iput(sbi->s_buddy_cache);
-+err_freesgi:
-+ kfree(sbi->s_group_info);
-+ return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *root = sb->s_root->d_inode;
-+ unsigned i, offset, max;
-+ struct dentry *dentry;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+ sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_offsets == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ return -ENOMEM;
-+ }
-+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_maxs == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+
-+ /* order 0 is regular bitmap */
-+ sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+ sbi->s_mb_offsets[0] = 0;
-+
-+ i = 1;
-+ offset = 0;
-+ max = sb->s_blocksize << 2;
-+ do {
-+ sbi->s_mb_offsets[i] = offset;
-+ sbi->s_mb_maxs[i] = max;
-+ offset += 1 << (sb->s_blocksize_bits - i);
-+ max = max >> 1;
-+ i++;
-+ } while (i <= sb->s_blocksize_bits + 1);
-+
-+ /* init file for buddy data */
-+ if ((i = ext3_mb_init_backend(sb))) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return i;
-+ }
-+
-+ spin_lock_init(&sbi->s_reserve_lock);
-+ spin_lock_init(&sbi->s_md_lock);
-+ INIT_LIST_HEAD(&sbi->s_active_transaction);
-+ INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+ INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+ spin_lock_init(&sbi->s_bal_lock);
-+
-+ /* remove old on-disk buddy file */
-+ down(&root->i_sem);
-+ dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+ if (dentry->d_inode != NULL) {
-+ i = vfs_unlink(root, dentry);
-+ if (i != 0)
-+ printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+ }
-+ dput(dentry);
-+ up(&root->i_sem);
-+
-+ ext3_mb_history_init(sb);
-+
-+ printk("EXT3-fs: mballoc enabled\n");
-+ return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, num_meta_group_infos;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ /* release freed, non-committed blocks */
-+ spin_lock(&sbi->s_md_lock);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_committed_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ ext3_mb_free_committed_blocks(sb);
-+
-+ if (sbi->s_group_info) {
-+ for (i = 0; i < sbi->s_groups_count; i++)
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ num_meta_group_infos = (sbi->s_groups_count +
-+ EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++)
-+ kfree(sbi->s_group_info[i]);
-+ kfree(sbi->s_group_info);
-+ }
-+ if (sbi->s_mb_offsets)
-+ kfree(sbi->s_mb_offsets);
-+ if (sbi->s_mb_maxs)
-+ kfree(sbi->s_mb_maxs);
-+ if (sbi->s_buddy_cache)
-+ iput(sbi->s_buddy_cache);
-+ if (sbi->s_blocks_reserved)
-+ printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+ sbi->s_blocks_reserved);
-+ if (ext3_mb_stats) {
-+ printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+ atomic_read(&sbi->s_bal_allocated),
-+ atomic_read(&sbi->s_bal_reqs),
-+ atomic_read(&sbi->s_bal_success));
-+ printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+ "%u 2^N hits, %u breaks\n",
-+ atomic_read(&sbi->s_bal_ex_scanned),
-+ atomic_read(&sbi->s_bal_goals),
-+ atomic_read(&sbi->s_bal_2orders),
-+ atomic_read(&sbi->s_bal_breaks));
-+ printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+ sbi->s_mb_buddies_generated++,
-+ sbi->s_mb_generation_time);
-+ }
-+
-+ ext3_mb_history_release(sb);
-+
-+ return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int err, i, count = 0, count2 = 0;
-+ struct ext3_free_metadata *md;
-+ struct ext3_buddy e3b;
-+
-+ if (list_empty(&sbi->s_committed_transaction))
-+ return;
-+
-+ /* there is committed blocks to be freed yet */
-+ do {
-+ /* get next array of blocks */
-+ md = NULL;
-+ spin_lock(&sbi->s_md_lock);
-+ if (!list_empty(&sbi->s_committed_transaction)) {
-+ md = list_entry(sbi->s_committed_transaction.next,
-+ struct ext3_free_metadata, list);
-+ list_del(&md->list);
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ if (md == NULL)
-+ break;
-+
-+ mb_debug("gonna free %u blocks in group %u (0x%p):",
-+ md->num, md->group, md);
-+
-+ err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ /* there are blocks to put in buddy to make them really free */
-+ count += md->num;
-+ count2++;
-+ ext3_lock_group(sb, md->group);
-+ for (i = 0; i < md->num; i++) {
-+ mb_debug(" %u", md->blocks[i]);
-+ mb_free_blocks(&e3b, md->blocks[i], 1);
-+ }
-+ mb_debug("\n");
-+ ext3_unlock_group(sb, md->group);
-+
-+ /* balance refcounts from ext3_mb_free_metadata() */
-+ page_cache_release(e3b.bd_buddy_page);
-+ page_cache_release(e3b.bd_bitmap_page);
-+
-+ kfree(md);
-+ ext3_mb_release_desc(&e3b);
-+
-+ } while (md);
-+ mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+ return;
-+
-+ /* new transaction! time to close last one and free blocks for
-+ * committed transaction. we know that only transaction can be
-+ * active, so previos transaction can be being logged and we
-+ * know that transaction before previous is known to be already
-+ * logged. this means that now we may free blocks freed in all
-+ * transactions before previous one. hope I'm clear enough ... */
-+
-+ spin_lock(&sbi->s_md_lock);
-+ if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+ mb_debug("new transaction %lu, old %lu\n",
-+ (unsigned long) handle->h_transaction->t_tid,
-+ (unsigned long) sbi->s_last_transaction);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_closed_transaction);
-+ sbi->s_last_transaction = handle->h_transaction->t_tid;
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+ int group, int block, int count)
-+{
-+ struct ext3_group_info *db = e3b->bd_info;
-+ struct super_block *sb = e3b->bd_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_free_metadata *md;
-+ int i;
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ ext3_lock_group(sb, group);
-+ for (i = 0; i < count; i++) {
-+ md = db->bb_md_cur;
-+ if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+ db->bb_md_cur = NULL;
-+ md = NULL;
-+ }
-+
-+ if (md == NULL) {
-+ ext3_unlock_group(sb, group);
-+ md = kmalloc(sizeof(*md), GFP_KERNEL);
-+ if (md == NULL)
-+ return -ENOMEM;
-+ md->num = 0;
-+ md->group = group;
-+
-+ ext3_lock_group(sb, group);
-+ if (db->bb_md_cur == NULL) {
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&md->list, &sbi->s_active_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ /* protect buddy cache from being freed,
-+ * otherwise we'll refresh it from
-+ * on-disk bitmap and lose not-yet-available
-+ * blocks */
-+ page_cache_get(e3b->bd_buddy_page);
-+ page_cache_get(e3b->bd_bitmap_page);
-+ db->bb_md_cur = md;
-+ db->bb_tid = handle->h_transaction->t_tid;
-+ mb_debug("new md 0x%p for group %u\n",
-+ md, md->group);
-+ } else {
-+ kfree(md);
-+ md = db->bb_md_cur;
-+ }
-+ }
-+
-+ BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+ md->blocks[md->num] = block + i;
-+ md->num++;
-+ if (md->num == EXT3_BB_MAX_BLOCKS) {
-+ /* no more space, put full container on a sb's list */
-+ db->bb_md_cur = NULL;
-+ }
-+ }
-+ ext3_unlock_group(sb, group);
-+ return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long block, unsigned long count,
-+ int metadata, int *freed)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ unsigned long bit, overflow;
-+ struct buffer_head *gd_bh;
-+ unsigned long block_group;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ int err = 0, ret;
-+
-+ *freed = 0;
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk ("ext3_free_blocks: nonexistent device");
-+ return;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+ if (block < le32_to_cpu(es->s_first_data_block) ||
-+ block + count < block ||
-+ block + count > le32_to_cpu(es->s_blocks_count)) {
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks not in datazone - "
-+ "block = %lu, count = %lu", block, count);
-+ goto error_return;
-+ }
-+
-+ ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+ overflow = 0;
-+ block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ /*
-+ * Check to see if we are freeing blocks across a group
-+ * boundary.
-+ */
-+ if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+ overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+ count -= overflow;
-+ }
-+ brelse(bitmap_bh);
-+ bitmap_bh = read_block_bitmap(sb, block_group);
-+ if (!bitmap_bh)
-+ goto error_return;
-+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+ if (!gdp)
-+ goto error_return;
-+
-+ if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+ in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+ in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group) ||
-+ in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks in system zones - "
-+ "Block = %lu, count = %lu",
-+ block, count);
-+
-+ BUFFER_TRACE(bitmap_bh, "getting write access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err)
-+ goto error_return;
-+
-+ /*
-+ * We are about to modify some metadata. Call the journal APIs
-+ * to unshare ->b_data if a currently-committing transaction is
-+ * using it
-+ */
-+ BUFFER_TRACE(gd_bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, gd_bh);
-+ if (err)
-+ goto error_return;
-+
-+ err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+ if (err)
-+ goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+ {
-+ int i;
-+ for (i = 0; i < count; i++)
-+ J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+ }
-+#endif
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+ /* We dirtied the bitmap block */
-+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+ if (metadata) {
-+ /* blocks being freed are metadata. these blocks shouldn't
-+ * be used until this transaction is committed */
-+ ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+ } else {
-+ ext3_lock_group(sb, block_group);
-+ mb_free_blocks(&e3b, bit, count);
-+ ext3_unlock_group(sb, block_group);
-+ }
-+
-+ spin_lock(sb_bgl_lock(sbi, block_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+ spin_unlock(sb_bgl_lock(sbi, block_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ *freed = count;
-+
-+ /* And the group descriptor block */
-+ BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+ ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+ if (!err) err = ret;
-+
-+ if (overflow && !err) {
-+ block += count;
-+ count = overflow;
-+ goto do_more;
-+ }
-+ sb->s_dirt = 1;
-+error_return:
-+ brelse(bitmap_bh);
-+ ext3_std_error(sb, err);
-+ return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int free, ret = -ENOSPC;
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+ if (blocks <= free - sbi->s_blocks_reserved) {
-+ sbi->s_blocks_reserved += blocks;
-+ ret = 0;
-+ }
-+ spin_unlock(&sbi->s_reserve_lock);
-+ return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ sbi->s_blocks_reserved -= blocks;
-+ WARN_ON(sbi->s_blocks_reserved < 0);
-+ if (sbi->s_blocks_reserved < 0)
-+ sbi->s_blocks_reserved = 0;
-+ spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+int ext3_new_block(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *errp)
-+{
-+ int ret, len;
-+
-+ if (!test_opt(inode->i_sb, MBALLOC)) {
-+ ret = ext3_new_block_old(handle, inode, goal, errp);
-+ goto out;
-+ }
-+ len = 1;
-+ ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+ return ret;
-+}
-+
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+ unsigned long block, unsigned long count, int metadata)
-+{
-+ struct super_block *sb;
-+ int freed;
-+
-+ sb = inode->i_sb;
-+ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+ ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+ else
-+ ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
-+ if (freed)
-+ DQUOT_FREE_BLOCK(inode, freed);
-+ return;
-+}
-+
-+#define EXT3_ROOT "ext3"
-+#define EXT3_MB_STATS_NAME "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_stats);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_STATS_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+ return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_max_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_min_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_order2_reqs = value;
-+
-+ return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+ struct proc_dir_entry *proc_ext3_mb_stats;
-+ struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+ proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+ if (proc_root_ext3 == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+ return -EIO;
-+ }
-+
-+ /* Initialize EXT3_MB_STATS_NAME */
-+ proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_stats == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_STATS_NAME);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_stats->data = NULL;
-+ proc_ext3_mb_stats->read_proc = ext3_mb_stats_read;
-+ proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+ /* Initialize EXT3_MAX_TO_SCAN_NAME */
-+ proc_ext3_mb_max_to_scan = create_proc_entry(
-+ EXT3_MB_MAX_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_max_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_max_to_scan->data = NULL;
-+ proc_ext3_mb_max_to_scan->read_proc = ext3_mb_max_to_scan_read;
-+ proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+ /* Initialize EXT3_MIN_TO_SCAN_NAME */
-+ proc_ext3_mb_min_to_scan = create_proc_entry(
-+ EXT3_MB_MIN_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_min_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_min_to_scan->data = NULL;
-+ proc_ext3_mb_min_to_scan->read_proc = ext3_mb_min_to_scan_read;
-+ proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+ /* Initialize EXT3_ORDER2_REQ */
-+ proc_ext3_mb_order2_req = create_proc_entry(
-+ EXT3_MB_ORDER2_REQ,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_order2_req == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_ORDER2_REQ);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_order2_req->data = NULL;
-+ proc_ext3_mb_order2_req->read_proc = ext3_mb_order2_req_read;
-+ proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+ return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-2.6.12.6-bull/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12.6-bull.orig/fs/ext3/Makefile 2006-04-29 20:39:09.000000000 +0400
-+++ linux-2.6.12.6-bull/fs/ext3/Makefile 2006-04-29 20:39:10.000000000 +0400
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o \
-- extents.o
-+ extents.o mballoc.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+++ /dev/null
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs.h 2006-07-16 02:29:49.000000000 +0800
-@@ -53,6 +53,14 @@
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT3_MULTIBLOCK_ALLOCATOR 1
-+
-+#define EXT3_MB_HINT_MERGE 1
-+#define EXT3_MB_HINT_RESERVED 2
-+#define EXT3_MB_HINT_METADATA 4
-+#define EXT3_MB_HINT_FIRST 8
-+#define EXT3_MB_HINT_BEST 16
-+
- /*
- * Special inodes numbers
- */
-@@ -379,6 +387,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS 0x1000000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG 0x2000000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC 0x4000000/* Buddy allocation support */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -405,6 +413,14 @@
- #define ext3_find_first_zero_bit ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit ext2_find_next_zero_bit
-
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif /* __LITTLE_ENDIAN */
-+#endif /* !ext2_find_next_le_bit */
-+
- /*
- * Maximal mount counts between two filesystem checks
- */
-@@ -749,12 +758,12 @@ ext3_group_first_block_no(struct super_b
- /* balloc.c */
- extern int ext3_bg_has_super(struct super_block *sb, int group);
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
--extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-+extern ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode,
- ext3_fsblk_t goal, int *errp);
- extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
- ext3_fsblk_t goal, unsigned long *count, int *errp);
- extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
-- ext3_fsblk_t block, unsigned long count);
-+ ext3_fsblk_t block, unsigned long count, int metadata);
- extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
- ext3_fsblk_t block, unsigned long count,
- unsigned long *pdquot_freed_blocks);
-@@ -881,6 +890,21 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *sb, int needs_recovery);
-+extern int ext3_mb_release(struct super_block *sb);
-+extern ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+ ext3_fsblk_t goal, int *errp);
-+extern ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ ext3_fsblk_t goal, int *len, int flags,
-+ int *errp);
-+extern int ext3_mb_reserve_blocks(struct super_block *sb, int);
-+extern void ext3_mb_release_blocks(struct super_block *sb, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif /* __KERNEL__ */
-
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-stage/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_sb.h 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/include/linux/ext3_fs_sb.h 2006-07-16 02:29:49.000000000 +0800
-@@ -21,8 +21,14 @@
- #include <linux/wait.h>
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
-
- /*
- * third extended-fs super-block data in memory
-@@ -78,6 +84,43 @@ struct ext3_sb_info {
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
- #endif
-+
-+ /* for buddy allocator */
-+ struct ext3_group_info ***s_group_info;
-+ struct inode *s_buddy_cache;
-+ long s_blocks_reserved;
-+ spinlock_t s_reserve_lock;
-+ struct list_head s_active_transaction;
-+ struct list_head s_closed_transaction;
-+ struct list_head s_committed_transaction;
-+ spinlock_t s_md_lock;
-+ tid_t s_last_transaction;
-+ int s_mb_factor;
-+ unsigned short *s_mb_offsets, *s_mb_maxs;
-+ unsigned long s_stripe;
-+
-+ /* history to debug policy */
-+ struct ext3_mb_history *s_mb_history;
-+ int s_mb_history_cur;
-+ int s_mb_history_max;
-+ struct proc_dir_entry *s_mb_proc;
-+ spinlock_t s_mb_history_lock;
-+
-+ /* stats for buddy allocator */
-+ atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_t s_bal_success; /* we found long enough chunks */
-+ atomic_t s_bal_allocated; /* in blocks */
-+ atomic_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_t s_bal_goals; /* goal hits */
-+ atomic_t s_bal_breaks; /* too long searches */
-+ atomic_t s_bal_2orders; /* 2^order hits */
-+ spinlock_t s_bal_lock;
-+ unsigned long s_mb_buddies_generated;
-+ unsigned long long s_mb_generation_time;
- };
-+
-+#define EXT3_GROUP_INFO(sb, group) \
-+ EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+ [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
-
- #endif /* _LINUX_EXT3_FS_SB */
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/super.c 2006-07-16 02:29:49.000000000 +0800
-@@ -391,6 +391,7 @@ static void ext3_put_super (struct super
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_mb_release(sb);
- ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
-@@ -642,6 +643,7 @@ enum {
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_mballoc, Opt_nomballoc, Opt_stripe,
- Opt_grpquota
- };
-
-@@ -696,6 +697,9 @@ static match_table_t tokens = {
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
- {Opt_extdebug, "extdebug"},
-+ {Opt_mballoc, "mballoc"},
-+ {Opt_nomballoc, "nomballoc"},
-+ {Opt_stripe, "stripe=%u"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -1047,6 +1049,19 @@ clear_qf_name:
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-+ case Opt_mballoc:
-+ set_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_nomballoc:
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_stripe:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_stripe = option;
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1773,6 +1778,7 @@ static int ext3_fill_super (struct super
- "writeback");
-
- ext3_ext_init(sb);
-+ ext3_mb_init(sb, needs_recovery);
- lock_kernel();
- return 0;
-
-@@ -2712,7 +2718,13 @@ static struct file_system_type ext3_fs_t
-
- static int __init init_ext3_fs(void)
- {
-- int err = init_ext3_xattr();
-+ int err;
-+
-+ err = init_ext3_proc();
-+ if (err)
-+ return err;
-+
-+ err = init_ext3_xattr();
- if (err)
- return err;
- err = init_inodecache();
-@@ -2734,6 +2746,7 @@ static void __exit exit_ext3_fs(void)
- unregister_filesystem(&ext3_fs_type);
- destroy_inodecache();
- exit_ext3_xattr();
-+ exit_ext3_proc();
- }
-
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-stage/fs/ext3/extents.c
-===================================================================
---- linux-stage.orig/fs/ext3/extents.c 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/extents.c 2006-07-16 02:29:49.000000000 +0800
-@@ -771,7 +771,7 @@ cleanup:
- for (i = 0; i < depth; i++) {
- if (!ablocks[i])
- continue;
-- ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- }
- }
- kfree(ablocks);
-@@ -1428,7 +1428,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- path->p_idx->ei_leaf);
- bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-- ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- return err;
- }
-
-@@ -1913,10 +1913,12 @@ ext3_remove_blocks(struct ext3_extents_t
- int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- handle_t *handle = ext3_journal_start(tree->inode, needed);
- struct buffer_head *bh;
-- int i;
-+ int i, metadata = 0;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-+ if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+ metadata = 1;
- if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- /* tail removal */
- unsigned long num, start;
-@@ -1928,7 +1930,7 @@ ext3_remove_blocks(struct ext3_extents_t
- bh = sb_find_get_block(tree->inode->i_sb, start + i);
- ext3_forget(handle, 0, tree->inode, bh, start + i);
- }
-- ext3_free_blocks(handle, tree->inode, start, num);
-+ ext3_free_blocks(handle, tree->inode, start, num, metadata);
- } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, ex->ee_block, ex->ee_len);
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/inode.c 2006-07-16 02:29:49.000000000 +0800
-@@ -562,7 +562,7 @@ static int ext3_alloc_blocks(handle_t *h
- return ret;
- failed_out:
- for (i = 0; i <index; i++)
-- ext3_free_blocks(handle, inode, new_blocks[i], 1);
-+ ext3_free_blocks(handle, inode, new_blocks[i], 1, 1);
- return ret;
- }
-
-@@ -661,9 +661,9 @@ failed:
- ext3_journal_forget(handle, branch[i].bh);
- }
- for (i = 0; i <indirect_blks; i++)
-- ext3_free_blocks(handle, inode, new_blocks[i], 1);
-+ ext3_free_blocks(handle, inode, new_blocks[i], 1, 1);
-
-- ext3_free_blocks(handle, inode, new_blocks[i], num);
-+ ext3_free_blocks(handle, inode, new_blocks[i], num, 1);
-
- return err;
- }
-@@ -760,9 +760,9 @@ err_out:
- for (i = 1; i <= num; i++) {
- BUFFER_TRACE(where[i].bh, "call journal_forget");
- ext3_journal_forget(handle, where[i].bh);
-- ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
-+ ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1,1);
- }
-- ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
-+ ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 1);
-
- return err;
- }
-@@ -2007,7 +2007,7 @@ static void ext3_clear_blocks(handle_t *
- }
- }
-
-- ext3_free_blocks(handle, inode, block_to_free, count);
-+ ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
-
- /**
-@@ -2180,7 +2180,7 @@ static void ext3_free_branches(handle_t
- ext3_journal_test_restart(handle, inode);
- }
-
-- ext3_free_blocks(handle, inode, nr, 1);
-+ ext3_free_blocks(handle, inode, nr, 1, 1);
-
- if (parent_bh) {
- /*
-Index: linux-stage/fs/ext3/balloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/balloc.c 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/balloc.c 2006-07-16 02:33:13.000000000 +0800
-@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
- *
- * Return buffer_head on success or NULL in case of failure.
- */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- struct ext3_group_desc * desc;
-@@ -490,24 +490,6 @@ error_return:
- return;
- }
-
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
-- ext3_fsblk_t block, unsigned long count)
--{
-- struct super_block * sb;
-- unsigned long dquot_freed_blocks;
--
-- sb = inode->i_sb;
-- if (!sb) {
-- printk ("ext3_free_blocks: nonexistent device");
-- return;
-- }
-- ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-- if (dquot_freed_blocks)
-- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
-- return;
--}
--
- /*
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy. This
-@@ -1463,7 +1445,7 @@ out:
- return 0;
- }
-
--ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode,
- ext3_fsblk_t goal, int *errp)
- {
- unsigned long count = 1;
-Index: linux-stage/fs/ext3/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.c 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/xattr.c 2006-07-16 02:29:49.000000000 +0800
-@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl
- ea_bdebug(bh, "refcount now=0; freeing");
- if (ce)
- mb_cache_entry_free(ce);
-- ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-+ ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
- get_bh(bh);
- ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
- } else {
-@@ -805,7 +805,7 @@ inserted:
- new_bh = sb_getblk(sb, block);
- if (!new_bh) {
- getblk_failed:
-- ext3_free_blocks(handle, inode, block, 1);
-+ ext3_free_blocks(handle, inode, block, 1, 1);
- error = -EIO;
- goto cleanup;
- }
-Index: linux-stage/fs/ext3/mballoc.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-stage/fs/ext3/mballoc.c 2006-07-16 02:29:49.000000000 +0800
-@@ -0,0 +1,2730 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ * - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ * - track min/max extents in each group for better group selection
-+ * - mb_mark_used() may allocate chunk right after splitting buddy
-+ * - special flag to advice allocator to look for requested + N blocks
-+ * this may improve interaction between extents and mballoc
-+ * - tree of groups sorted by number of free blocks
-+ * - percpu reservation code (hotpath)
-+ * - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...) printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS 30
-+
-+struct ext3_free_metadata {
-+ unsigned short group;
-+ unsigned short num;
-+ unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+ struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+ unsigned long bb_state;
-+ unsigned long bb_tid;
-+ struct ext3_free_metadata *bb_md_cur;
-+ unsigned short bb_first_free;
-+ unsigned short bb_free;
-+ unsigned short bb_fragments;
-+ unsigned short bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT3_GROUP_INFO_LOCKED_BIT 1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp) \
-+ (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+ __u16 fe_start;
-+ __u16 fe_len;
-+ __u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+ struct super_block *ac_sb;
-+
-+ /* search goals */
-+ struct ext3_free_extent ac_g_ex;
-+
-+ /* the best found extent */
-+ struct ext3_free_extent ac_b_ex;
-+
-+ /* number of iterations done. we have to track to limit searching */
-+ unsigned long ac_ex_scanned;
-+ __u16 ac_groups_scanned;
-+ __u16 ac_found;
-+ __u16 ac_tail;
-+ __u16 ac_buddy;
-+ __u8 ac_status;
-+ __u8 ac_flags; /* allocation hints */
-+ __u8 ac_criteria;
-+ __u8 ac_repeats;
-+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
-+ * N > 0, the field stores N, otherwise 0 */
-+
-+ struct page *ac_buddy_page;
-+ struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE 1
-+#define AC_STATUS_FOUND 2
-+#define AC_STATUS_BREAK 3
-+
-+struct ext3_mb_history {
-+ struct ext3_free_extent goal; /* goal allocation */
-+ struct ext3_free_extent result; /* result allocation */
-+ unsigned pid;
-+ unsigned ino;
-+ __u16 found; /* how many extents have been found */
-+ __u16 groups; /* how many groups have been scanned */
-+ __u16 tail; /* what tail broke some buddy */
-+ __u16 buddy; /* buddy the tail ^^^ broke */
-+ __u8 cr; /* which phase the result extent was found at */
-+ __u8 merged;
-+};
-+
-+struct ext3_buddy {
-+ struct page *bd_buddy_page;
-+ void *bd_buddy;
-+ struct page *bd_bitmap_page;
-+ void *bd_bitmap;
-+ struct ext3_group_info *bd_info;
-+ struct super_block *bd_sb;
-+ __u16 bd_blkbits;
-+ __u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+ struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+int ext3_mb_reserve_blocks(struct super_block *, int);
-+void ext3_mb_release_blocks(struct super_block *, int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 7UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~7UL); \
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 3UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~3UL); \
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+ int fix;
-+#if BITS_PER_LONG == 64
-+ fix = ((unsigned long) addr & 7UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+ fix = ((unsigned long) addr & 3UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+ max += fix;
-+ start += fix;
-+ return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+ char *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(max != NULL);
-+
-+ if (order > e3b->bd_blkbits + 1) {
-+ *max = 0;
-+ return NULL;
-+ }
-+
-+ /* at order 0 we see each particular block */
-+ *max = 1 << (e3b->bd_blkbits + 3);
-+ if (order == 0)
-+ return EXT3_MB_BITMAP(e3b);
-+
-+ bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+ *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+ return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+ int order = e3b->bd_blkbits + 1;
-+ int max, max2, i, j, k, count;
-+ int fragments = 0, fstart;
-+ void *buddy, *buddy2;
-+
-+ if (!test_opt(e3b->bd_sb, MBALLOC))
-+ return;
-+
-+ {
-+ static int mb_check_counter = 0;
-+ if (mb_check_counter++ % 300 != 0)
-+ return;
-+ }
-+
-+ while (order > 1) {
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+ J_ASSERT(buddy2);
-+ J_ASSERT(buddy != buddy2);
-+ J_ASSERT(max * 2 == max2);
-+
-+ count = 0;
-+ for (i = 0; i < max; i++) {
-+
-+ if (mb_test_bit(i, buddy)) {
-+ /* only single bit in buddy2 may be 1 */
-+ if (!mb_test_bit(i << 1, buddy2))
-+ J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+ else if (!mb_test_bit((i << 1) + 1, buddy2))
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ continue;
-+ }
-+
-+ /* both bits in buddy2 must be 0 */
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+ for (j = 0; j < (1 << order); j++) {
-+ k = (i * (1 << order)) + j;
-+ J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+ }
-+ count++;
-+ }
-+ J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+ order--;
-+ }
-+
-+ fstart = -1;
-+ buddy = mb_find_buddy(e3b, 0, &max);
-+ for (i = 0; i < max; i++) {
-+ if (!mb_test_bit(i, buddy)) {
-+ J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+ if (fstart == -1) {
-+ fragments++;
-+ fstart = i;
-+ }
-+ continue;
-+ }
-+ fstart = -1;
-+ /* check used bits only */
-+ for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+ buddy2 = mb_find_buddy(e3b, j, &max2);
-+ k = i >> j;
-+ J_ASSERT(k < max2);
-+ J_ASSERT(mb_test_bit(k, buddy2));
-+ }
-+ }
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+ J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+ int order;
-+
-+ if (word > 255) {
-+ order = 7;
-+ word >>= 8;
-+ } else {
-+ order = -1;
-+ }
-+
-+ do {
-+ order++;
-+ word >>= 1;
-+ } while (word != 0);
-+
-+ return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+ int len, struct ext3_group_info *grp)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ unsigned short min, max, chunk, border;
-+
-+ mb_debug("mark %u/%u free\n", first, len);
-+ J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ border = 2 << sb->s_blocksize_bits;
-+
-+ while (len > 0) {
-+ /* find how many blocks can be covered since this position */
-+ max = ffs(first | border) - 1;
-+
-+ /* find how many blocks of power 2 we need to mark */
-+ min = fmsb(len);
-+
-+ mb_debug(" %u/%u -> max %u, min %u\n",
-+ first & ((2 << sb->s_blocksize_bits) - 1),
-+ len, max, min);
-+
-+ if (max < min)
-+ min = max;
-+ chunk = 1 << min;
-+
-+ /* mark multiblock chunks only */
-+ grp->bb_counters[min]++;
-+ if (min > 0) {
-+ mb_debug(" set %u at %u \n", first >> min,
-+ sbi->s_mb_offsets[min]);
-+ mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+ }
-+
-+ len -= chunk;
-+ first += chunk;
-+ }
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+ int group)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+ unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+ unsigned short i = 0, first, len;
-+ unsigned free = 0, fragments = 0;
-+ unsigned long long period = get_cycles();
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+ grp->bb_first_free = i;
-+ while (i < max) {
-+ fragments++;
-+ first = i;
-+ i = ext2_find_next_le_bit(bitmap, max, i);
-+ len = i - first;
-+ free += len;
-+ if (len > 1)
-+ ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+ else
-+ grp->bb_counters[0]++;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+ grp->bb_fragments = fragments;
-+
-+ /* bb_state shouldn't being modified because all
-+ * others waits for init completion on page lock */
-+ clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+ if (free != grp->bb_free) {
-+ printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+ group, free, grp->bb_free);
-+ grp->bb_free = free;
-+ }
-+
-+ period = get_cycles() - period;
-+ spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+ EXT3_SB(sb)->s_mb_buddies_generated++;
-+ EXT3_SB(sb)->s_mb_generation_time += period;
-+ spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+ int blocksize, blocks_per_page, groups_per_page;
-+ int err = 0, i, first_group, first_block;
-+ struct super_block *sb;
-+ struct buffer_head *bhs;
-+ struct buffer_head **bh;
-+ struct inode *inode;
-+ char *data, *bitmap;
-+
-+ mb_debug("init page %lu\n", page->index);
-+
-+ inode = page->mapping->host;
-+ sb = inode->i_sb;
-+ blocksize = 1 << inode->i_blkbits;
-+ blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+ groups_per_page = blocks_per_page >> 1;
-+ if (groups_per_page == 0)
-+ groups_per_page = 1;
-+
-+ /* allocate buffer_heads to read bitmaps */
-+ if (groups_per_page > 1) {
-+ err = -ENOMEM;
-+ i = sizeof(struct buffer_head *) * groups_per_page;
-+ bh = kmalloc(i, GFP_NOFS);
-+ if (bh == NULL)
-+ goto out;
-+ memset(bh, 0, i);
-+ } else
-+ bh = &bhs;
-+
-+ first_group = page->index * blocks_per_page / 2;
-+
-+ /* read all groups the page covers into the cache */
-+ for (i = 0; i < groups_per_page; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ err = -EIO;
-+ desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+ if (desc == NULL)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ if (bh[i] == NULL)
-+ goto out;
-+
-+ if (buffer_uptodate(bh[i]))
-+ continue;
-+
-+ lock_buffer(bh[i]);
-+ if (buffer_uptodate(bh[i])) {
-+ unlock_buffer(bh[i]);
-+ continue;
-+ }
-+
-+ get_bh(bh[i]);
-+ bh[i]->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh[i]);
-+ mb_debug("read bitmap for group %u\n", first_group + i);
-+ }
-+
-+ /* wait for I/O completion */
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ wait_on_buffer(bh[i]);
-+
-+ err = -EIO;
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ if (!buffer_uptodate(bh[i]))
-+ goto out;
-+
-+ first_block = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++) {
-+ int group;
-+
-+ group = (first_block + i) >> 1;
-+ if (group >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ data = page_address(page) + (i * blocksize);
-+ bitmap = bh[group - first_group]->b_data;
-+
-+ if ((first_block + i) & 1) {
-+ /* this is block of buddy */
-+ mb_debug("put buddy for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memset(data, 0xff, blocksize);
-+ EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+ memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+ sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+ ext3_mb_generate_buddy(sb, data, bitmap, group);
-+ } else {
-+ /* this is block of bitmap */
-+ mb_debug("put bitmap for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memcpy(data, bitmap, blocksize);
-+ }
-+ }
-+ SetPageUptodate(page);
-+
-+out:
-+ if (bh) {
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ brelse(bh[i]);
-+ if (bh != &bhs)
-+ kfree(bh);
-+ }
-+ return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *inode = sbi->s_buddy_cache;
-+ int blocks_per_page, block, pnum, poff;
-+ struct page *page;
-+
-+ mb_debug("load group %u\n", group);
-+
-+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+ e3b->bd_blkbits = sb->s_blocksize_bits;
-+ e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+ e3b->bd_sb = sb;
-+ e3b->bd_group = group;
-+ e3b->bd_buddy_page = NULL;
-+ e3b->bd_bitmap_page = NULL;
-+
-+ block = group * 2;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ /* we could use find_or_create_page(), but it locks page
-+ * what we'd like to avoid in fast path ... */
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_bitmap_page = page;
-+ e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ block++;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_buddy_page = page;
-+ e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ return 0;
-+
-+err:
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+ e3b->bd_buddy = NULL;
-+ e3b->bd_bitmap = NULL;
-+ return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+ int order = 1;
-+ void *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+ bb = EXT3_MB_BUDDY(e3b);
-+ while (order <= e3b->bd_blkbits + 1) {
-+ block = block >> 1;
-+ if (!mb_test_bit(block, bb)) {
-+ /* this block is part of buddy of order 'order' */
-+ return order;
-+ }
-+ bb += 1 << (e3b->bd_blkbits - order);
-+ order++;
-+ }
-+ return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_clear_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0xffffffff;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_set_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+ int block = 0, max = 0, order;
-+ void *buddy, *buddy2;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free += count;
-+ if (first < e3b->bd_info->bb_first_free)
-+ e3b->bd_info->bb_first_free = first;
-+
-+ /* let's maintain fragments counter */
-+ if (first != 0)
-+ block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+ if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+ if (block && max)
-+ e3b->bd_info->bb_fragments--;
-+ else if (!block && !max)
-+ e3b->bd_info->bb_fragments++;
-+
-+ /* let's maintain buddy itself */
-+ while (count-- > 0) {
-+ block = first++;
-+ order = 0;
-+
-+ J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+ mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ /* start of the buddy */
-+ buddy = mb_find_buddy(e3b, order, &max);
-+
-+ do {
-+ block &= ~1UL;
-+ if (mb_test_bit(block, buddy) ||
-+ mb_test_bit(block + 1, buddy))
-+ break;
-+
-+ /* both the buddies are free, try to coalesce them */
-+ buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+ if (!buddy2)
-+ break;
-+
-+ if (order > 0) {
-+ /* for special purposes, we don't set
-+ * free bits in bitmap */
-+ mb_set_bit(block, buddy);
-+ mb_set_bit(block + 1, buddy);
-+ }
-+ e3b->bd_info->bb_counters[order]--;
-+ e3b->bd_info->bb_counters[order]--;
-+
-+ block = block >> 1;
-+ order++;
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ mb_clear_bit(block, buddy2);
-+ buddy = buddy2;
-+ } while (1);
-+ }
-+ mb_check_buddy(e3b);
-+
-+ return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+ int needed, struct ext3_free_extent *ex)
-+{
-+ int next = block, max, ord;
-+ void *buddy;
-+
-+ J_ASSERT(ex != NULL);
-+
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ J_ASSERT(block < max);
-+ if (mb_test_bit(block, buddy)) {
-+ ex->fe_len = 0;
-+ ex->fe_start = 0;
-+ ex->fe_group = 0;
-+ return 0;
-+ }
-+
-+ if (likely(order == 0)) {
-+ /* find actual order */
-+ order = mb_find_order_for_block(e3b, block);
-+ block = block >> order;
-+ }
-+
-+ ex->fe_len = 1 << order;
-+ ex->fe_start = block << order;
-+ ex->fe_group = e3b->bd_group;
-+
-+ /* calc difference from given start */
-+ next = next - ex->fe_start;
-+ ex->fe_len -= next;
-+ ex->fe_start += next;
-+
-+ while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+ if (block + 1 >= max)
-+ break;
-+
-+ next = (block + 1) * (1 << order);
-+ if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+ break;
-+
-+ ord = mb_find_order_for_block(e3b, next);
-+
-+ order = ord;
-+ block = next >> order;
-+ ex->fe_len += 1 << order;
-+ }
-+
-+ J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+ return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+ int ord, mlen = 0, max = 0, cur;
-+ int start = ex->fe_start;
-+ int len = ex->fe_len;
-+ unsigned ret = 0;
-+ int len0 = len;
-+ void *buddy;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free -= len;
-+ if (e3b->bd_info->bb_first_free == start)
-+ e3b->bd_info->bb_first_free += len;
-+
-+ /* let's maintain fragments counter */
-+ if (start != 0)
-+ mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+ if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+ if (mlen && max)
-+ e3b->bd_info->bb_fragments++;
-+ else if (!mlen && !max)
-+ e3b->bd_info->bb_fragments--;
-+
-+ /* let's maintain buddy itself */
-+ while (len) {
-+ ord = mb_find_order_for_block(e3b, start);
-+
-+ if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+ /* the whole chunk may be allocated at once! */
-+ mlen = 1 << ord;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ J_ASSERT((start >> ord) < max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+ start += mlen;
-+ len -= mlen;
-+ J_ASSERT(len >= 0);
-+ continue;
-+ }
-+
-+ /* store for history */
-+ if (ret == 0)
-+ ret = len | (ord << 16);
-+
-+ /* we have to split large buddy */
-+ J_ASSERT(ord > 0);
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+
-+ ord--;
-+ cur = (start >> ord) & ~1U;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_clear_bit(cur, buddy);
-+ mb_clear_bit(cur + 1, buddy);
-+ e3b->bd_info->bb_counters[ord]++;
-+ e3b->bd_info->bb_counters[ord]++;
-+ }
-+
-+ /* now drop all the bits in bitmap */
-+ mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+ mb_check_buddy(e3b);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ unsigned long ret;
-+
-+ ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+ ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+ ac->ac_status = AC_STATUS_FOUND;
-+ ac->ac_tail = ret & 0xffff;
-+ ac->ac_buddy = ret >> 16;
-+
-+ /* hold in-core structures until allocated
-+ * blocks are marked non-free in on-disk bitmap */
-+ ac->ac_buddy_page = e3b->bd_buddy_page;
-+ page_cache_get(e3b->bd_buddy_page);
-+ ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+ page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+ struct ext3_free_extent *ex,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent *bex = &ac->ac_b_ex;
-+ struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+ J_ASSERT(ex->fe_len > 0);
-+ J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+ J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+ ac->ac_found++;
-+
-+ /*
-+ * The special case - take what you catch first
-+ */
-+ if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * Let's check whether the chunk is good enough
-+ */
-+ if (ex->fe_len == gex->fe_len) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * If this is first found extent, just store it in the context
-+ */
-+ if (bex->fe_len == 0) {
-+ *bex = *ex;
-+ return;
-+ }
-+
-+ /*
-+ * If new found extent is better, store it in the context
-+ */
-+ if (bex->fe_len < gex->fe_len) {
-+ /* if the request isn't satisfied, any found extent
-+ * larger than previous best one is better */
-+ if (ex->fe_len > bex->fe_len)
-+ *bex = *ex;
-+ } else if (ex->fe_len > gex->fe_len) {
-+ /* if the request is satisfied, then we try to find
-+ * an extent that still satisfy the request, but is
-+ * smaller than previous one */
-+ *bex = *ex;
-+ }
-+
-+ /*
-+ * Let's scan at least few extents and don't pick up a first one
-+ */
-+ if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+
-+ /*
-+ * We don't want to scan for a whole year
-+ */
-+ if (ac->ac_found > ext3_mb_max_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent ex = ac->ac_b_ex;
-+ int group = ex.fe_group, max, err;
-+
-+ J_ASSERT(ex.fe_len > 0);
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+ if (max > 0) {
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ int group = ac->ac_g_ex.fe_group, max, err;
-+ struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+ struct ext3_super_block *es = sbi->s_es;
-+ struct ext3_free_extent ex;
-+
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+ ac->ac_g_ex.fe_len, &ex);
-+
-+ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+ ext3_fsblk_t start;
-+ start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+ ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+ if (start % sbi->s_stripe == 0) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ } else if (max >= ac->ac_g_ex.fe_len) {
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+ /* Sometimes, caller may want to merge even small
-+ * number of blocks to an existing extent */
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_group_info *grp = e3b->bd_info;
-+ void *buddy;
-+ int i, k, max;
-+
-+ J_ASSERT(ac->ac_2order > 0);
-+ for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+ if (grp->bb_counters[i] == 0)
-+ continue;
-+
-+ buddy = mb_find_buddy(e3b, i, &max);
-+ if (buddy == NULL) {
-+ printk(KERN_ALERT "looking for wrong order?\n");
-+ break;
-+ }
-+
-+ k = mb_find_next_zero_bit(buddy, max, 0);
-+ J_ASSERT(k < max);
-+
-+ ac->ac_found++;
-+
-+ ac->ac_b_ex.fe_len = 1 << i;
-+ ac->ac_b_ex.fe_start = k << i;
-+ ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+ ext3_mb_use_best_found(ac, e3b);
-+ J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+ if (unlikely(ext3_mb_stats))
-+ atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+ break;
-+ }
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ int i, free;
-+
-+ free = e3b->bd_info->bb_free;
-+ J_ASSERT(free > 0);
-+
-+ i = e3b->bd_info->bb_first_free;
-+
-+ while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+ i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+ if (i >= sb->s_blocksize * 8) {
-+ J_ASSERT(free == 0);
-+ break;
-+ }
-+
-+ mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(free >= ex.fe_len);
-+
-+ ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+ i += ex.fe_len;
-+ free -= ex.fe_len;
-+ }
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ ext3_fsblk_t i, max;
-+
-+ J_ASSERT(sbi->s_stripe != 0);
-+
-+ /* find first stripe-aligned block */
-+ i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb) +
-+ le32_to_cpu(sbi->s_es->s_first_data_block);
-+ i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+ i = (i - le32_to_cpu(sbi->s_es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+
-+ while (i < sb->s_blocksize * 8) {
-+ if (!mb_test_bit(i, bitmap)) {
-+ max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+ if (max >= sbi->s_stripe) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ break;
-+ }
-+ }
-+ i += sbi->s_stripe;
-+ }
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+ int group, int cr)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+ unsigned free, fragments, i, bits;
-+
-+ J_ASSERT(cr >= 0 && cr < 4);
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+ free = grp->bb_free;
-+ fragments = grp->bb_fragments;
-+ if (free == 0)
-+ return 0;
-+ if (fragments == 0)
-+ return 0;
-+
-+ switch (cr) {
-+ case 0:
-+ J_ASSERT(ac->ac_2order != 0);
-+ bits = ac->ac_sb->s_blocksize_bits + 1;
-+ for (i = ac->ac_2order; i <= bits; i++)
-+ if (grp->bb_counters[i] > 0)
-+ return 1;
-+ break;
-+ case 1:
-+ if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 2:
-+ if (free >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 3:
-+ return 1;
-+ default:
-+ BUG();
-+ }
-+
-+ return 0;
-+}
-+
-+ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ ext3_fsblk_t goal, int *len,int flags,int *errp)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_allocation_context ac;
-+ int i, group, cr, err = 0;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ struct buffer_head *gdp_bh;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ ext3_fsblk_t block;
-+
-+ J_ASSERT(len != NULL);
-+ J_ASSERT(*len > 0);
-+
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk("ext3_mb_new_nblocks: nonexistent device");
-+ return 0;
-+ }
-+
-+ if (!test_opt(sb, MBALLOC)) {
-+ static int ext3_mballoc_warning = 0;
-+ if (ext3_mballoc_warning == 0) {
-+ printk(KERN_ERR "EXT3-fs: multiblock request with "
-+ "mballoc disabled!\n");
-+ ext3_mballoc_warning++;
-+ }
-+ *len = 1;
-+ err = ext3_new_block_old(handle, inode, goal, errp);
-+ return err;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+
-+ /*
-+ * We can't allocate > group size
-+ */
-+ if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+ *len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* someone asks for non-reserved blocks */
-+ BUG_ON(*len > 1);
-+ err = ext3_mb_reserve_blocks(sb, 1);
-+ if (err) {
-+ *errp = err;
-+ return 0;
-+ }
-+ }
-+
-+ ac.ac_buddy_page = NULL;
-+ ac.ac_bitmap_page = NULL;
-+
-+ /*
-+ * Check quota for allocation of this blocks.
-+ */
-+ while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+ *len -= 1;
-+ if (*len == 0) {
-+ *errp = -EDQUOT;
-+ block = 0;
-+ goto out;
-+ }
-+
-+ /* start searching from the goal */
-+ if (goal < le32_to_cpu(es->s_first_data_block) ||
-+ goal >= le32_to_cpu(es->s_blocks_count))
-+ goal = le32_to_cpu(es->s_first_data_block);
-+ group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ /* set up allocation goals */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_groups_scanned = 0;
-+ ac.ac_ex_scanned = 0;
-+ ac.ac_found = 0;
-+ ac.ac_sb = inode->i_sb;
-+ ac.ac_g_ex.fe_group = group;
-+ ac.ac_g_ex.fe_start = block;
-+ ac.ac_g_ex.fe_len = *len;
-+ ac.ac_flags = flags;
-+ ac.ac_2order = 0;
-+ ac.ac_criteria = 0;
-+
-+ if (*len == 1 && sbi->s_stripe) {
-+ /* looks like a metadata, let's use a dirty hack for raid5
-+ * move all metadata in first groups in hope to hit cached
-+ * sectors and thus avoid read-modify cycles in raid5 */
-+ ac.ac_g_ex.fe_group = group = 0;
-+ }
-+
-+ /* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+ i = ffs(*len);
-+ if (i >= ext3_mb_order2_reqs) {
-+ i--;
-+ if ((*len & (~(1 << i))) == 0)
-+ ac.ac_2order = i;
-+ }
-+
-+ /* first, try the goal */
-+ err = ext3_mb_find_by_goal(&ac, &e3b);
-+ if (err)
-+ goto out_err;
-+ if (ac.ac_status == AC_STATUS_FOUND)
-+ goto found;
-+
-+ /* Let's just scan groups to find more-less suitable blocks */
-+ cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+ for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+ ac.ac_criteria = cr;
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+ if (group == EXT3_SB(sb)->s_groups_count)
-+ group = 0;
-+
-+ if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+ /* we need full data about the group
-+ * to make a good selection */
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+ ext3_mb_release_desc(&e3b);
-+ }
-+
-+ /* check is group good for our criteries */
-+ if (!ext3_mb_good_group(&ac, group, cr))
-+ continue;
-+
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+
-+ ext3_lock_group(sb, group);
-+ if (!ext3_mb_good_group(&ac, group, cr)) {
-+ /* someone did allocation from this group */
-+ ext3_unlock_group(sb, group);
-+ ext3_mb_release_desc(&e3b);
-+ continue;
-+ }
-+
-+ ac.ac_groups_scanned++;
-+ if (cr == 0)
-+ ext3_mb_simple_scan_group(&ac, &e3b);
-+ else if (cr == 1 && *len == sbi->s_stripe)
-+ ext3_mb_scan_aligned(&ac, &e3b);
-+ else
-+ ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+ ext3_unlock_group(sb, group);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ if (ac.ac_status != AC_STATUS_CONTINUE)
-+ break;
-+ }
-+ }
-+
-+ if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+ !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+ /*
-+ * We've been searching too long. Let's try to allocate
-+ * the best chunk we've found so far
-+ */
-+
-+ /*if (ac.ac_found > ext3_mb_max_to_scan)
-+ printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+ "%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+ ac.ac_g_ex.fe_len);*/
-+ ext3_mb_try_best_found(&ac, &e3b);
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * Someone more lucky has already allocated it.
-+ * The only thing we can do is just take first
-+ * found block(s)
-+ printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+ */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+ cr = 3;
-+ goto repeat;
-+ }
-+ }
-+
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * We aren't lucky definitely
-+ */
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ *errp = -ENOSPC;
-+ block = 0;
-+#if 1
-+ printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+ ac.ac_status, ac.ac_flags);
-+ printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+ ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+ ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+ printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+ sbi->s_blocks_reserved, ac.ac_found);
-+ printk("EXT3-fs: groups: ");
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+ printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+ printk("\n");
-+#endif
-+ goto out;
-+ }
-+
-+found:
-+ J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+ /* good news - free block(s) have been found. now it's time
-+ * to mark block(s) in good old journaled bitmap */
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ /* we made a desicion, now mark found blocks in good old
-+ * bitmap to be journaled */
-+
-+ ext3_debug("using block group %d(%d)\n",
-+ ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+ bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+ if (!bitmap_bh) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) {
-+ *errp = err;
-+ goto out_err;
-+ }
-+
-+ gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+ if (!gdp) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error(sb, "ext3_new_block",
-+ "Allocating block in system zone - "
-+ "block = "E3FSBLK, block);
-+#ifdef AGGRESSIVE_CHECK
-+ for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+ J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+ mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+ spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+ - ac.ac_b_ex.fe_len);
-+ spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err)
-+ goto out_err;
-+ err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ sb->s_dirt = 1;
-+ *errp = 0;
-+ brelse(bitmap_bh);
-+
-+ /* drop non-allocated, but dquote'd blocks */
-+ J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+ DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+ *len = ac.ac_b_ex.fe_len;
-+ J_ASSERT(*len > 0);
-+ J_ASSERT(block != 0);
-+ goto out;
-+
-+out_err:
-+ /* if we've already allocated something, roll it back */
-+ if (ac.ac_status == AC_STATUS_FOUND) {
-+ /* FIXME: free blocks here */
-+ }
-+
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ brelse(bitmap_bh);
-+ *errp = err;
-+ block = 0;
-+out:
-+ if (ac.ac_buddy_page)
-+ page_cache_release(ac.ac_buddy_page);
-+ if (ac.ac_bitmap_page)
-+ page_cache_release(ac.ac_bitmap_page);
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* block wasn't reserved before and we reserved it
-+ * at the beginning of allocation. it doesn't matter
-+ * whether we allocated anything or we failed: time
-+ * to release reservation. NOTE: because I expect
-+ * any multiblock request from delayed allocation
-+ * path only, here is single block always */
-+ ext3_mb_release_blocks(sb, 1);
-+ }
-+
-+ if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+ atomic_inc(&sbi->s_bal_reqs);
-+ atomic_add(*len, &sbi->s_bal_allocated);
-+ if (*len >= ac.ac_g_ex.fe_len)
-+ atomic_inc(&sbi->s_bal_success);
-+ atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+ if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+ ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+ atomic_inc(&sbi->s_bal_goals);
-+ if (ac.ac_found > ext3_mb_max_to_scan)
-+ atomic_inc(&sbi->s_bal_breaks);
-+ }
-+
-+ ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+ return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+ struct ext3_mb_history *history;
-+ struct super_block *sb;
-+ int start;
-+ int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+ struct ext3_mb_history *hs,
-+ int first)
-+{
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (!first && hs == s->history + s->start)
-+ return NULL;
-+ while (hs->goal.fe_len == 0) {
-+ hs++;
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (hs == s->history + s->start)
-+ return NULL;
-+ }
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs;
-+ int l = *pos;
-+
-+ if (l == 0)
-+ return SEQ_START_TOKEN;
-+ hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ if (!hs)
-+ return NULL;
-+ while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs = v;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ else
-+ return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+ struct ext3_mb_history *hs = v;
-+ char buf[20], buf2[20];
-+
-+ if (v == SEQ_START_TOKEN) {
-+ seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+ "pid", "inode", "goal", "result", "found", "grps", "cr",
-+ "merge", "tail", "broken");
-+ return 0;
-+ }
-+
-+ sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+ hs->goal.fe_start, hs->goal.fe_len);
-+ sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+ hs->result.fe_start, hs->result.fe_len);
-+ seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+ hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+ hs->cr, hs->merged ? "M" : "", hs->tail,
-+ hs->buddy ? 1 << hs->buddy : 0);
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+ .start = ext3_mb_seq_history_start,
-+ .next = ext3_mb_seq_history_next,
-+ .stop = ext3_mb_seq_history_stop,
-+ .show = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_proc_session *s;
-+ int rc, size;
-+
-+ s = kmalloc(sizeof(*s), GFP_KERNEL);
-+ if (s == NULL)
-+ return -EIO;
-+ size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+ s->history = kmalloc(size, GFP_KERNEL);
-+ if (s == NULL) {
-+ kfree(s);
-+ return -EIO;
-+ }
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(s->history, sbi->s_mb_history, size);
-+ s->max = sbi->s_mb_history_max;
-+ s->start = sbi->s_mb_history_cur % s->max;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+
-+ rc = seq_open(file, &ext3_mb_seq_history_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = s;
-+ } else {
-+ kfree(s->history);
-+ kfree(s);
-+ }
-+ return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+ struct seq_file *seq = (struct seq_file *)file->private_data;
-+ struct ext3_mb_proc_session *s = seq->private;
-+ kfree(s->history);
-+ kfree(s);
-+ return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_history_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+
-+ group = *pos + 1;
-+ return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ ++*pos;
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+ group = *pos + 1;
-+ return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+ struct super_block *sb = seq->private;
-+ long group = (long) v, i;
-+ struct sg {
-+ struct ext3_group_info info;
-+ unsigned short counters[16];
-+ } sg;
-+
-+ group--;
-+ if (group == 0)
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+ "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+ "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+ "2^11", "2^12", "2^13");
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+ sizeof(struct ext3_group_info);
-+ ext3_lock_group(sb, group);
-+ memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+ ext3_unlock_group(sb, group);
-+
-+ if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+ return 0;
-+
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+ sg.info.bb_fragments, sg.info.bb_first_free);
-+ for (i = 0; i <= 13; i++)
-+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+ sg.info.bb_counters[i] : 0);
-+ seq_printf(seq, " ]\n");
-+
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+ .start = ext3_mb_seq_groups_start,
-+ .next = ext3_mb_seq_groups_next,
-+ .stop = ext3_mb_seq_groups_stop,
-+ .show = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ int rc;
-+
-+ rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = sb;
-+ }
-+ return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_groups_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+ remove_proc_entry("mb_history", sbi->s_mb_proc);
-+ remove_proc_entry(name, proc_root_ext3);
-+
-+ if (sbi->s_mb_history)
-+ kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+ int i;
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+ if (sbi->s_mb_proc != NULL) {
-+ struct proc_dir_entry *p;
-+ p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_history_fops;
-+ p->data = sb;
-+ }
-+ p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_groups_fops;
-+ p->data = sb;
-+ }
-+ }
-+
-+ sbi->s_mb_history_max = 1000;
-+ sbi->s_mb_history_cur = 0;
-+ spin_lock_init(&sbi->s_mb_history_lock);
-+ i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+ sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+ memset(sbi->s_mb_history, 0, i);
-+ /* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+ struct ext3_allocation_context *ac)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_history h;
-+
-+ if (likely(sbi->s_mb_history == NULL))
-+ return;
-+
-+ h.pid = current->pid;
-+ h.ino = ino;
-+ h.goal = ac->ac_g_ex;
-+ h.result = ac->ac_b_ex;
-+ h.found = ac->ac_found;
-+ h.cr = ac->ac_criteria;
-+ h.groups = ac->ac_groups_scanned;
-+ h.tail = ac->ac_tail;
-+ h.buddy = ac->ac_buddy;
-+ h.merged = 0;
-+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+ h.merged = 1;
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+ if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+ sbi->s_mb_history_cur = 0;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, j, len, metalen;
-+ int num_meta_group_infos =
-+ (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ struct ext3_group_info **meta_group_info;
-+
-+ /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+ * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+ * So a two level scheme suffices for now. */
-+ sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+ num_meta_group_infos, GFP_KERNEL);
-+ if (sbi->s_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+ return -ENOMEM;
-+ }
-+ sbi->s_buddy_cache = new_inode(sb);
-+ if (sbi->s_buddy_cache == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+ goto err_freesgi;
-+ }
-+
-+ metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++) {
-+ if ((i + 1) == num_meta_group_infos)
-+ metalen = sizeof(*meta_group_info) *
-+ (sbi->s_groups_count -
-+ (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+ meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+ if (meta_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+ "buddy group\n");
-+ goto err_freemeta;
-+ }
-+ sbi->s_group_info[i] = meta_group_info;
-+ }
-+
-+ /*
-+ * calculate needed size. if change bb_counters size,
-+ * don't forget about ext3_mb_generate_buddy()
-+ */
-+ len = sizeof(struct ext3_group_info);
-+ len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+ for (i = 0; i < sbi->s_groups_count; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ meta_group_info =
-+ sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+ j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+ meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+ if (meta_group_info[j] == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+ i--;
-+ goto err_freebuddy;
-+ }
-+ desc = ext3_get_group_desc(sb, i, NULL);
-+ if (desc == NULL) {
-+ printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+ goto err_freebuddy;
-+ }
-+ memset(meta_group_info[j], 0, len);
-+ set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+ &meta_group_info[j]->bb_state);
-+ meta_group_info[j]->bb_free =
-+ le16_to_cpu(desc->bg_free_blocks_count);
-+ }
-+
-+ return 0;
-+
-+err_freebuddy:
-+ while (i >= 0) {
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ i--;
-+ }
-+ i = num_meta_group_infos;
-+err_freemeta:
-+ while (--i >= 0)
-+ kfree(sbi->s_group_info[i]);
-+ iput(sbi->s_buddy_cache);
-+err_freesgi:
-+ kfree(sbi->s_group_info);
-+ return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *root = sb->s_root->d_inode;
-+ unsigned i, offset, max;
-+ struct dentry *dentry;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+ sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_offsets == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ return -ENOMEM;
-+ }
-+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_maxs == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+
-+ /* order 0 is regular bitmap */
-+ sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+ sbi->s_mb_offsets[0] = 0;
-+
-+ i = 1;
-+ offset = 0;
-+ max = sb->s_blocksize << 2;
-+ do {
-+ sbi->s_mb_offsets[i] = offset;
-+ sbi->s_mb_maxs[i] = max;
-+ offset += 1 << (sb->s_blocksize_bits - i);
-+ max = max >> 1;
-+ i++;
-+ } while (i <= sb->s_blocksize_bits + 1);
-+
-+ /* init file for buddy data */
-+ if ((i = ext3_mb_init_backend(sb))) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return i;
-+ }
-+
-+ spin_lock_init(&sbi->s_reserve_lock);
-+ spin_lock_init(&sbi->s_md_lock);
-+ INIT_LIST_HEAD(&sbi->s_active_transaction);
-+ INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+ INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+ spin_lock_init(&sbi->s_bal_lock);
-+
-+ /* remove old on-disk buddy file */
-+ mutex_lock(&root->i_mutex);
-+ dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+ if (dentry->d_inode != NULL) {
-+ i = vfs_unlink(root, dentry);
-+ if (i != 0)
-+ printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+ }
-+ dput(dentry);
-+ mutex_unlock(&root->i_mutex);
-+
-+ ext3_mb_history_init(sb);
-+
-+ printk("EXT3-fs: mballoc enabled\n");
-+ return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, num_meta_group_infos;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ /* release freed, non-committed blocks */
-+ spin_lock(&sbi->s_md_lock);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_committed_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ ext3_mb_free_committed_blocks(sb);
-+
-+ if (sbi->s_group_info) {
-+ for (i = 0; i < sbi->s_groups_count; i++)
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ num_meta_group_infos = (sbi->s_groups_count +
-+ EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++)
-+ kfree(sbi->s_group_info[i]);
-+ kfree(sbi->s_group_info);
-+ }
-+ if (sbi->s_mb_offsets)
-+ kfree(sbi->s_mb_offsets);
-+ if (sbi->s_mb_maxs)
-+ kfree(sbi->s_mb_maxs);
-+ if (sbi->s_buddy_cache)
-+ iput(sbi->s_buddy_cache);
-+ if (sbi->s_blocks_reserved)
-+ printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+ sbi->s_blocks_reserved);
-+ if (ext3_mb_stats) {
-+ printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+ atomic_read(&sbi->s_bal_allocated),
-+ atomic_read(&sbi->s_bal_reqs),
-+ atomic_read(&sbi->s_bal_success));
-+ printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+ "%u 2^N hits, %u breaks\n",
-+ atomic_read(&sbi->s_bal_ex_scanned),
-+ atomic_read(&sbi->s_bal_goals),
-+ atomic_read(&sbi->s_bal_2orders),
-+ atomic_read(&sbi->s_bal_breaks));
-+ printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+ sbi->s_mb_buddies_generated++,
-+ sbi->s_mb_generation_time);
-+ }
-+
-+ ext3_mb_history_release(sb);
-+
-+ return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int err, i, count = 0, count2 = 0;
-+ struct ext3_free_metadata *md;
-+ struct ext3_buddy e3b;
-+
-+ if (list_empty(&sbi->s_committed_transaction))
-+ return;
-+
-+ /* there is committed blocks to be freed yet */
-+ do {
-+ /* get next array of blocks */
-+ md = NULL;
-+ spin_lock(&sbi->s_md_lock);
-+ if (!list_empty(&sbi->s_committed_transaction)) {
-+ md = list_entry(sbi->s_committed_transaction.next,
-+ struct ext3_free_metadata, list);
-+ list_del(&md->list);
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ if (md == NULL)
-+ break;
-+
-+ mb_debug("gonna free %u blocks in group %u (0x%p):",
-+ md->num, md->group, md);
-+
-+ err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ /* there are blocks to put in buddy to make them really free */
-+ count += md->num;
-+ count2++;
-+ ext3_lock_group(sb, md->group);
-+ for (i = 0; i < md->num; i++) {
-+ mb_debug(" %u", md->blocks[i]);
-+ mb_free_blocks(&e3b, md->blocks[i], 1);
-+ }
-+ mb_debug("\n");
-+ ext3_unlock_group(sb, md->group);
-+
-+ /* balance refcounts from ext3_mb_free_metadata() */
-+ page_cache_release(e3b.bd_buddy_page);
-+ page_cache_release(e3b.bd_bitmap_page);
-+
-+ kfree(md);
-+ ext3_mb_release_desc(&e3b);
-+
-+ } while (md);
-+ mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+ return;
-+
-+ /* new transaction! time to close last one and free blocks for
-+ * committed transaction. we know that only transaction can be
-+ * active, so previos transaction can be being logged and we
-+ * know that transaction before previous is known to be already
-+ * logged. this means that now we may free blocks freed in all
-+ * transactions before previous one. hope I'm clear enough ... */
-+
-+ spin_lock(&sbi->s_md_lock);
-+ if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+ mb_debug("new transaction %lu, old %lu\n",
-+ (unsigned long) handle->h_transaction->t_tid,
-+ (unsigned long) sbi->s_last_transaction);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_closed_transaction);
-+ sbi->s_last_transaction = handle->h_transaction->t_tid;
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+ int group, int block, int count)
-+{
-+ struct ext3_group_info *db = e3b->bd_info;
-+ struct super_block *sb = e3b->bd_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_free_metadata *md;
-+ int i;
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ ext3_lock_group(sb, group);
-+ for (i = 0; i < count; i++) {
-+ md = db->bb_md_cur;
-+ if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+ db->bb_md_cur = NULL;
-+ md = NULL;
-+ }
-+
-+ if (md == NULL) {
-+ ext3_unlock_group(sb, group);
-+ md = kmalloc(sizeof(*md), GFP_KERNEL);
-+ if (md == NULL)
-+ return -ENOMEM;
-+ md->num = 0;
-+ md->group = group;
-+
-+ ext3_lock_group(sb, group);
-+ if (db->bb_md_cur == NULL) {
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&md->list, &sbi->s_active_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ /* protect buddy cache from being freed,
-+ * otherwise we'll refresh it from
-+ * on-disk bitmap and lose not-yet-available
-+ * blocks */
-+ page_cache_get(e3b->bd_buddy_page);
-+ page_cache_get(e3b->bd_bitmap_page);
-+ db->bb_md_cur = md;
-+ db->bb_tid = handle->h_transaction->t_tid;
-+ mb_debug("new md 0x%p for group %u\n",
-+ md, md->group);
-+ } else {
-+ kfree(md);
-+ md = db->bb_md_cur;
-+ }
-+ }
-+
-+ BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+ md->blocks[md->num] = block + i;
-+ md->num++;
-+ if (md->num == EXT3_BB_MAX_BLOCKS) {
-+ /* no more space, put full container on a sb's list */
-+ db->bb_md_cur = NULL;
-+ }
-+ }
-+ ext3_unlock_group(sb, group);
-+ return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+ ext3_fsblk_t block, unsigned long count,
-+ int metadata, unsigned long *freed)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ unsigned long bit, overflow;
-+ struct buffer_head *gd_bh;
-+ unsigned long block_group;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ int err = 0, ret;
-+
-+ *freed = 0;
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk ("ext3_free_blocks: nonexistent device");
-+ return;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+ if (block < le32_to_cpu(es->s_first_data_block) ||
-+ block + count < block ||
-+ block + count > le32_to_cpu(es->s_blocks_count)) {
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks not in datazone - "
-+ "block = %lu, count = %lu", block, count);
-+ goto error_return;
-+ }
-+
-+ ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+ overflow = 0;
-+ block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ /*
-+ * Check to see if we are freeing blocks across a group
-+ * boundary.
-+ */
-+ if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+ overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+ count -= overflow;
-+ }
-+ brelse(bitmap_bh);
-+ bitmap_bh = read_block_bitmap(sb, block_group);
-+ if (!bitmap_bh)
-+ goto error_return;
-+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+ if (!gdp)
-+ goto error_return;
-+
-+ if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+ in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+ in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group) ||
-+ in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks in system zones - "
-+ "Block = %lu, count = %lu",
-+ block, count);
-+
-+ BUFFER_TRACE(bitmap_bh, "getting write access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err)
-+ goto error_return;
-+
-+ /*
-+ * We are about to modify some metadata. Call the journal APIs
-+ * to unshare ->b_data if a currently-committing transaction is
-+ * using it
-+ */
-+ BUFFER_TRACE(gd_bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, gd_bh);
-+ if (err)
-+ goto error_return;
-+
-+ err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+ if (err)
-+ goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+ {
-+ int i;
-+ for (i = 0; i < count; i++)
-+ J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+ }
-+#endif
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+ /* We dirtied the bitmap block */
-+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+ if (metadata) {
-+ /* blocks being freed are metadata. these blocks shouldn't
-+ * be used until this transaction is committed */
-+ ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+ } else {
-+ ext3_lock_group(sb, block_group);
-+ mb_free_blocks(&e3b, bit, count);
-+ ext3_unlock_group(sb, block_group);
-+ }
-+
-+ spin_lock(sb_bgl_lock(sbi, block_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+ spin_unlock(sb_bgl_lock(sbi, block_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ *freed = count;
-+
-+ /* And the group descriptor block */
-+ BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+ ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+ if (!err) err = ret;
-+
-+ if (overflow && !err) {
-+ block += count;
-+ count = overflow;
-+ goto do_more;
-+ }
-+ sb->s_dirt = 1;
-+error_return:
-+ brelse(bitmap_bh);
-+ ext3_std_error(sb, err);
-+ return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int free, ret = -ENOSPC;
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+ if (blocks <= free - sbi->s_blocks_reserved) {
-+ sbi->s_blocks_reserved += blocks;
-+ ret = 0;
-+ }
-+ spin_unlock(&sbi->s_reserve_lock);
-+ return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ sbi->s_blocks_reserved -= blocks;
-+ WARN_ON(sbi->s_blocks_reserved < 0);
-+ if (sbi->s_blocks_reserved < 0)
-+ sbi->s_blocks_reserved = 0;
-+ spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-+ ext3_fsblk_t goal, int *errp)
-+{
-+ ext3_fsblk_t ret;
-+ int len;
-+
-+ if (!test_opt(inode->i_sb, MBALLOC)) {
-+ ret = ext3_new_block_old(handle, inode, goal, errp);
-+ goto out;
-+ }
-+ len = 1;
-+ ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+ return ret;
-+}
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+ ext3_fsblk_t block, unsigned long count, int metadata)
-+{
-+ struct super_block *sb;
-+ unsigned long freed;
-+
-+ sb = inode->i_sb;
-+ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+ ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+ else
-+ ext3_mb_free_blocks(handle, inode, block, count, metadata,
-+ &freed);
-+ if (freed)
-+ DQUOT_FREE_BLOCK(inode, freed);
-+ return;
-+}
-+
-+#define EXT3_ROOT "ext3"
-+#define EXT3_MB_STATS_NAME "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_stats);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_STATS_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+ return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_max_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_min_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_order2_reqs = value;
-+
-+ return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+ struct proc_dir_entry *proc_ext3_mb_stats;
-+ struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+ proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+ if (proc_root_ext3 == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+ return -EIO;
-+ }
-+
-+ /* Initialize EXT3_MB_STATS_NAME */
-+ proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_stats == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_STATS_NAME);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_stats->data = NULL;
-+ proc_ext3_mb_stats->read_proc = ext3_mb_stats_read;
-+ proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+ /* Initialize EXT3_MAX_TO_SCAN_NAME */
-+ proc_ext3_mb_max_to_scan = create_proc_entry(
-+ EXT3_MB_MAX_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_max_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_max_to_scan->data = NULL;
-+ proc_ext3_mb_max_to_scan->read_proc = ext3_mb_max_to_scan_read;
-+ proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+ /* Initialize EXT3_MIN_TO_SCAN_NAME */
-+ proc_ext3_mb_min_to_scan = create_proc_entry(
-+ EXT3_MB_MIN_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_min_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_min_to_scan->data = NULL;
-+ proc_ext3_mb_min_to_scan->read_proc = ext3_mb_min_to_scan_read;
-+ proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+ /* Initialize EXT3_ORDER2_REQ */
-+ proc_ext3_mb_order2_req = create_proc_entry(
-+ EXT3_MB_ORDER2_REQ,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_order2_req == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_ORDER2_REQ);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_order2_req->data = NULL;
-+ proc_ext3_mb_order2_req->read_proc = ext3_mb_order2_req_read;
-+ proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+ return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile 2006-07-16 02:29:43.000000000 +0800
-+++ linux-stage/fs/ext3/Makefile 2006-07-16 02:29:49.000000000 +0800
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o \
-- extents.o
-+ extents.o mballoc.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+++ /dev/null
-Index: linux-2.6.9-full/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.9-full.orig/include/linux/ext3_fs.h 2006-06-01 14:58:46.000000000 +0400
-+++ linux-2.6.9-full/include/linux/ext3_fs.h 2006-10-24 12:54:31.000000000 +0400
-@@ -57,6 +57,14 @@ struct statfs;
- #define ext3_debug(f, a...) do {} while (0)
- #endif
-
-+#define EXT3_MULTIBLOCK_ALLOCATOR 1
-+
-+#define EXT3_MB_HINT_MERGE 1
-+#define EXT3_MB_HINT_RESERVED 2
-+#define EXT3_MB_HINT_METADATA 4
-+#define EXT3_MB_HINT_FIRST 8
-+#define EXT3_MB_HINT_BEST 16
-+
- /*
- * Special inodes numbers
- */
-@@ -365,6 +373,7 @@ struct ext3_inode {
- #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
- #define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
- #define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
-+#define EXT3_MOUNT_MBALLOC 0x800000/* Buddy allocation support */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef clear_opt
-@@ -387,6 +396,14 @@ struct ext3_inode {
- #define ext3_find_first_zero_bit ext2_find_first_zero_bit
- #define ext3_find_next_zero_bit ext2_find_next_zero_bit
-
-+#ifndef ext2_find_next_le_bit
-+#ifdef __LITTLE_ENDIAN
-+#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
-+#else
-+#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
-+#endif /* __LITTLE_ENDIAN */
-+#endif /* !ext2_find_next_le_bit */
-+
- /*
- * Maximal mount counts between two filesystem checks
- */
-@@ -726,7 +743,8 @@ extern int ext3_bg_has_super(struct supe
- extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
- extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
-- unsigned long);
-+ unsigned long, int);
-+extern int ext3_new_block_old(handle_t *, struct inode *, unsigned long, int *);
- extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
- unsigned long, unsigned long, int *);
- extern unsigned long ext3_count_free_blocks (struct super_block *);
-@@ -857,6 +874,17 @@ extern void ext3_extents_initialize_bloc
- extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-+/* mballoc.c */
-+extern long ext3_mb_stats;
-+extern long ext3_mb_max_to_scan;
-+extern int ext3_mb_init(struct super_block *, int);
-+extern int ext3_mb_release(struct super_block *);
-+extern int ext3_mb_new_blocks(handle_t *, struct inode *, unsigned long, int *, int, int *);
-+extern int ext3_mb_reserve_blocks(struct super_block *, int);
-+extern void ext3_mb_release_blocks(struct super_block *, int);
-+int __init init_ext3_proc(void);
-+void exit_ext3_proc(void);
-+
- #endif /* __KERNEL__ */
-
- /* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-Index: linux-2.6.9-full/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.9-full.orig/include/linux/ext3_fs_sb.h 2006-05-18 23:57:04.000000000 +0400
-+++ linux-2.6.9-full/include/linux/ext3_fs_sb.h 2006-10-24 12:54:31.000000000 +0400
-@@ -23,9 +23,15 @@
- #define EXT_INCLUDE
- #include <linux/blockgroup_lock.h>
- #include <linux/percpu_counter.h>
-+#include <linux/list.h>
- #endif
- #endif
- #include <linux/rbtree.h>
-+#include <linux/proc_fs.h>
-+
-+struct ext3_buddy_group_blocks;
-+struct ext3_mb_history;
-+#define EXT3_BB_MAX_BLOCKS
-
- /*
- * third extended-fs super-block data in memory
-@@ -81,6 +87,43 @@ struct ext3_sb_info {
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
- int s_jquota_fmt; /* Format of quota to use */
- #endif
-+
-+ /* for buddy allocator */
-+ struct ext3_group_info ***s_group_info;
-+ struct inode *s_buddy_cache;
-+ long s_blocks_reserved;
-+ spinlock_t s_reserve_lock;
-+ struct list_head s_active_transaction;
-+ struct list_head s_closed_transaction;
-+ struct list_head s_committed_transaction;
-+ spinlock_t s_md_lock;
-+ tid_t s_last_transaction;
-+ int s_mb_factor;
-+ unsigned short *s_mb_offsets, *s_mb_maxs;
-+ unsigned long s_stripe;
-+
-+ /* history to debug policy */
-+ struct ext3_mb_history *s_mb_history;
-+ int s_mb_history_cur;
-+ int s_mb_history_max;
-+ struct proc_dir_entry *s_mb_proc;
-+ spinlock_t s_mb_history_lock;
-+
-+ /* stats for buddy allocator */
-+ atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_t s_bal_success; /* we found long enough chunks */
-+ atomic_t s_bal_allocated; /* in blocks */
-+ atomic_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_t s_bal_goals; /* goal hits */
-+ atomic_t s_bal_breaks; /* too long searches */
-+ atomic_t s_bal_2orders; /* 2^order hits */
-+ spinlock_t s_bal_lock;
-+ unsigned long s_mb_buddies_generated;
-+ unsigned long long s_mb_generation_time;
- };
-
-+#define EXT3_GROUP_INFO(sb, group) \
-+ EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
-+ [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
-+
- #endif /* _LINUX_EXT3_FS_SB */
-Index: linux-2.6.9-full/fs/ext3/super.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/super.c 2006-06-01 14:58:46.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/super.c 2006-10-24 12:54:31.000000000 +0400
-@@ -394,6 +394,7 @@ void ext3_put_super (struct super_block
- struct ext3_super_block *es = sbi->s_es;
- int i;
-
-+ ext3_mb_release(sb);
- ext3_ext_release(sb);
- ext3_xattr_put_super(sb);
- journal_destroy(sbi->s_journal);
-@@ -597,6 +598,7 @@ enum {
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_extents, Opt_noextents, Opt_extdebug,
-+ Opt_mballoc, Opt_nomballoc, Opt_stripe,
- };
-
- static match_table_t tokens = {
-@@ -649,6 +651,9 @@ static match_table_t tokens = {
- {Opt_extents, "extents"},
- {Opt_noextents, "noextents"},
- {Opt_extdebug, "extdebug"},
-+ {Opt_mballoc, "mballoc"},
-+ {Opt_nomballoc, "nomballoc"},
-+ {Opt_stripe, "stripe=%u"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -962,6 +967,19 @@ clear_qf_name:
- case Opt_extdebug:
- set_opt (sbi->s_mount_opt, EXTDEBUG);
- break;
-+ case Opt_mballoc:
-+ set_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_nomballoc:
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ break;
-+ case Opt_stripe:
-+ if (match_int(&args[0], &option))
-+ return 0;
-+ if (option < 0)
-+ return 0;
-+ sbi->s_stripe = option;
-+ break;
- default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
-@@ -1651,6 +1669,7 @@ static int ext3_fill_super (struct super
- ext3_count_dirs(sb));
-
- ext3_ext_init(sb);
-+ ext3_mb_init(sb, needs_recovery);
-
- return 0;
-
-@@ -2433,7 +2452,13 @@ static struct file_system_type ext3_fs_t
-
- static int __init init_ext3_fs(void)
- {
-- int err = init_ext3_xattr();
-+ int err;
-+
-+ err = init_ext3_proc();
-+ if (err)
-+ return err;
-+
-+ err = init_ext3_xattr();
- if (err)
- return err;
- err = init_inodecache();
-@@ -2455,6 +2480,7 @@ static void __exit exit_ext3_fs(void)
- unregister_filesystem(&ext3_fs_type);
- destroy_inodecache();
- exit_ext3_xattr();
-+ exit_ext3_proc();
- }
-
- int ext3_prep_san_write(struct inode *inode, long *blocks,
-Index: linux-2.6.9-full/fs/ext3/extents.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/extents.c 2006-06-01 14:58:46.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/extents.c 2006-10-24 12:54:31.000000000 +0400
-@@ -777,7 +777,7 @@ cleanup:
- for (i = 0; i < depth; i++) {
- if (!ablocks[i])
- continue;
-- ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
-+ ext3_free_blocks(handle, tree->inode, ablocks[i], 1, 1);
- }
- }
- kfree(ablocks);
-@@ -1434,7 +1434,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
- path->p_idx->ei_leaf);
- bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
- ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
-- ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
-+ ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1, 1);
- return err;
- }
-
-@@ -1919,10 +1919,12 @@ ext3_remove_blocks(struct ext3_extents_t
- int needed = ext3_remove_blocks_credits(tree, ex, from, to);
- handle_t *handle = ext3_journal_start(tree->inode, needed);
- struct buffer_head *bh;
-- int i;
-+ int i, metadata = 0;
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-+ if (S_ISDIR(tree->inode->i_mode) || S_ISLNK(tree->inode->i_mode))
-+ metadata = 1;
- if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
- /* tail removal */
- unsigned long num, start;
-@@ -1934,7 +1936,7 @@ ext3_remove_blocks(struct ext3_extents_t
- bh = sb_find_get_block(tree->inode->i_sb, start + i);
- ext3_forget(handle, 0, tree->inode, bh, start + i);
- }
-- ext3_free_blocks(handle, tree->inode, start, num);
-+ ext3_free_blocks(handle, tree->inode, start, num, metadata);
- } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
- printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, ex->ee_block, ex->ee_len);
-Index: linux-2.6.9-full/fs/ext3/inode.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/inode.c 2006-06-01 14:58:46.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/inode.c 2006-10-24 12:54:31.000000000 +0400
-@@ -572,7 +572,7 @@ static int ext3_alloc_branch(handle_t *h
- ext3_journal_forget(handle, branch[i].bh);
- }
- for (i = 0; i < keys; i++)
-- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
-+ ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1, 1);
- return err;
- }
-
-@@ -673,7 +673,7 @@ err_out:
- if (err == -EAGAIN)
- for (i = 0; i < num; i++)
- ext3_free_blocks(handle, inode,
-- le32_to_cpu(where[i].key), 1);
-+ le32_to_cpu(where[i].key), 1, 1);
- return err;
- }
-
-@@ -1831,7 +1831,7 @@ ext3_clear_blocks(handle_t *handle, stru
- }
- }
-
-- ext3_free_blocks(handle, inode, block_to_free, count);
-+ ext3_free_blocks(handle, inode, block_to_free, count, 1);
- }
-
- /**
-@@ -2004,7 +2004,7 @@ static void ext3_free_branches(handle_t
- ext3_journal_test_restart(handle, inode);
- }
-
-- ext3_free_blocks(handle, inode, nr, 1);
-+ ext3_free_blocks(handle, inode, nr, 1, 1);
-
- if (parent_bh) {
- /*
-Index: linux-2.6.9-full/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/balloc.c 2006-03-10 18:20:03.000000000 +0300
-+++ linux-2.6.9-full/fs/ext3/balloc.c 2006-10-24 12:54:31.000000000 +0400
-@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
- *
- * Return buffer_head on success or NULL in case of failure.
- */
--static struct buffer_head *
-+struct buffer_head *
- read_block_bitmap(struct super_block *sb, unsigned int block_group)
- {
- struct ext3_group_desc * desc;
-@@ -451,24 +451,6 @@ error_return:
- return;
- }
-
--/* Free given blocks, update quota and i_blocks field */
--void ext3_free_blocks(handle_t *handle, struct inode *inode,
-- unsigned long block, unsigned long count)
--{
-- struct super_block * sb;
-- int dquot_freed_blocks;
--
-- sb = inode->i_sb;
-- if (!sb) {
-- printk ("ext3_free_blocks: nonexistent device");
-- return;
-- }
-- ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-- if (dquot_freed_blocks)
-- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
-- return;
--}
--
- /*
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy. This
-@@ -1131,7 +1113,7 @@ int ext3_should_retry_alloc(struct super
- * bitmap, and then for any free bit if that fails.
- * This function also updates quota and i_blocks field.
- */
--int ext3_new_block(handle_t *handle, struct inode *inode,
-+int ext3_new_block_old(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
- {
- struct buffer_head *bitmap_bh = NULL;
-Index: linux-2.6.9-full/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/xattr.c 2006-05-18 23:57:04.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/xattr.c 2006-10-24 12:54:31.000000000 +0400
-@@ -1281,7 +1281,7 @@ ext3_xattr_set_handle2(handle_t *handle,
- new_bh = sb_getblk(sb, block);
- if (!new_bh) {
- getblk_failed:
-- ext3_free_blocks(handle, inode, block, 1);
-+ ext3_free_blocks(handle, inode, block, 1, 1);
- error = -EIO;
- goto cleanup;
- }
-@@ -1328,7 +1328,7 @@ getblk_failed:
- if (ce)
- mb_cache_entry_free(ce);
- ea_bdebug(old_bh, "freeing");
-- ext3_free_blocks(handle, inode, old_bh->b_blocknr, 1);
-+ ext3_free_blocks(handle, inode, old_bh->b_blocknr, 1, 1);
-
- /* ext3_forget() calls bforget() for us, but we
- let our caller release old_bh, so we need to
-@@ -1427,7 +1427,7 @@ ext3_xattr_delete_inode(handle_t *handle
- if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
- if (ce)
- mb_cache_entry_free(ce);
-- ext3_free_blocks(handle, inode, EXT3_I(inode)->i_file_acl, 1);
-+ ext3_free_blocks(handle, inode, EXT3_I(inode)->i_file_acl, 1, 1);
- get_bh(bh);
- ext3_forget(handle, 1, inode, bh, EXT3_I(inode)->i_file_acl);
- } else {
-Index: linux-2.6.9-full/fs/ext3/mballoc.c
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/mballoc.c 2006-10-23 18:07:54.821533176 +0400
-+++ linux-2.6.9-full/fs/ext3/mballoc.c 2006-10-24 13:00:56.000000000 +0400
-@@ -0,0 +1,2725 @@
-+/*
-+ * Copyright (c) 2003-2005, Cluster File Systems, Inc, info@clusterfs.com
-+ * Written by Alex Tomas <alex@clusterfs.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public Licens
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
-+ */
-+
-+
-+/*
-+ * mballoc.c contains the multiblocks allocation routines
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/time.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/quotaops.h>
-+#include <linux/buffer_head.h>
-+#include <linux/module.h>
-+#include <linux/swap.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+
-+/*
-+ * TODO:
-+ * - bitmap read-ahead (proposed by Oleg Drokin aka green)
-+ * - track min/max extents in each group for better group selection
-+ * - mb_mark_used() may allocate chunk right after splitting buddy
-+ * - special flag to advice allocator to look for requested + N blocks
-+ * this may improve interaction between extents and mballoc
-+ * - tree of groups sorted by number of free blocks
-+ * - percpu reservation code (hotpath)
-+ * - error handling
-+ */
-+
-+/*
-+ * with AGRESSIVE_CHECK allocator runs consistency checks over
-+ * structures. these checks slow things down a lot
-+ */
-+#define AGGRESSIVE_CHECK__
-+
-+/*
-+ */
-+#define MB_DEBUG__
-+#ifdef MB_DEBUG
-+#define mb_debug(fmt,a...) printk(fmt, ##a)
-+#else
-+#define mb_debug(fmt,a...)
-+#endif
-+
-+/*
-+ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
-+ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
-+ */
-+#define EXT3_MB_HISTORY
-+
-+/*
-+ * How long mballoc can look for a best extent (in found extents)
-+ */
-+long ext3_mb_max_to_scan = 500;
-+
-+/*
-+ * How long mballoc must look for a best extent
-+ */
-+long ext3_mb_min_to_scan = 30;
-+
-+/*
-+ * with 'ext3_mb_stats' allocator will collect stats that will be
-+ * shown at umount. The collecting costs though!
-+ */
-+
-+long ext3_mb_stats = 1;
-+
-+/*
-+ * for which requests use 2^N search using buddies
-+ */
-+long ext3_mb_order2_reqs = 8;
-+
-+#ifdef EXT3_BB_MAX_BLOCKS
-+#undef EXT3_BB_MAX_BLOCKS
-+#endif
-+#define EXT3_BB_MAX_BLOCKS 30
-+
-+struct ext3_free_metadata {
-+ unsigned short group;
-+ unsigned short num;
-+ unsigned short blocks[EXT3_BB_MAX_BLOCKS];
-+ struct list_head list;
-+};
-+
-+struct ext3_group_info {
-+ unsigned long bb_state;
-+ unsigned long bb_tid;
-+ struct ext3_free_metadata *bb_md_cur;
-+ unsigned short bb_first_free;
-+ unsigned short bb_free;
-+ unsigned short bb_fragments;
-+ unsigned short bb_counters[];
-+};
-+
-+
-+#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
-+#define EXT3_GROUP_INFO_LOCKED_BIT 1
-+
-+#define EXT3_MB_GRP_NEED_INIT(grp) \
-+ (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
-+
-+struct ext3_free_extent {
-+ __u16 fe_start;
-+ __u16 fe_len;
-+ __u16 fe_group;
-+};
-+
-+struct ext3_allocation_context {
-+ struct super_block *ac_sb;
-+
-+ /* search goals */
-+ struct ext3_free_extent ac_g_ex;
-+
-+ /* the best found extent */
-+ struct ext3_free_extent ac_b_ex;
-+
-+ /* number of iterations done. we have to track to limit searching */
-+ unsigned long ac_ex_scanned;
-+ __u16 ac_groups_scanned;
-+ __u16 ac_found;
-+ __u16 ac_tail;
-+ __u16 ac_buddy;
-+ __u8 ac_status;
-+ __u8 ac_flags; /* allocation hints */
-+ __u8 ac_criteria;
-+ __u8 ac_repeats;
-+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
-+ * N > 0, the field stores N, otherwise 0 */
-+
-+ struct page *ac_buddy_page;
-+ struct page *ac_bitmap_page;
-+};
-+
-+#define AC_STATUS_CONTINUE 1
-+#define AC_STATUS_FOUND 2
-+#define AC_STATUS_BREAK 3
-+
-+struct ext3_mb_history {
-+ struct ext3_free_extent goal; /* goal allocation */
-+ struct ext3_free_extent result; /* result allocation */
-+ unsigned pid;
-+ unsigned ino;
-+ __u16 found; /* how many extents have been found */
-+ __u16 groups; /* how many groups have been scanned */
-+ __u16 tail; /* what tail broke some buddy */
-+ __u16 buddy; /* buddy the tail ^^^ broke */
-+ __u8 cr; /* which phase the result extent was found at */
-+ __u8 merged;
-+};
-+
-+struct ext3_buddy {
-+ struct page *bd_buddy_page;
-+ void *bd_buddy;
-+ struct page *bd_bitmap_page;
-+ void *bd_bitmap;
-+ struct ext3_group_info *bd_info;
-+ struct super_block *bd_sb;
-+ __u16 bd_blkbits;
-+ __u16 bd_group;
-+};
-+#define EXT3_MB_BITMAP(e3b) ((e3b)->bd_bitmap)
-+#define EXT3_MB_BUDDY(e3b) ((e3b)->bd_buddy)
-+
-+#ifndef EXT3_MB_HISTORY
-+#define ext3_mb_store_history(sb,ino,ac)
-+#else
-+static void ext3_mb_store_history(struct super_block *, unsigned ino,
-+ struct ext3_allocation_context *ac);
-+#endif
-+
-+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-+
-+static struct proc_dir_entry *proc_root_ext3;
-+
-+struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
-+void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
-+void ext3_mb_free_committed_blocks(struct super_block *);
-+
-+#if BITS_PER_LONG == 64
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 7UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~7UL); \
-+}
-+#elif BITS_PER_LONG == 32
-+#define mb_correct_addr_and_bit(bit,addr) \
-+{ \
-+ bit += ((unsigned long) addr & 3UL) << 3; \
-+ addr = (void *) ((unsigned long) addr & ~3UL); \
-+}
-+#else
-+#error "how many bits you are?!"
-+#endif
-+
-+static inline int mb_test_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ return ext2_test_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit(bit, addr);
-+}
-+
-+static inline void mb_set_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_set_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline void mb_clear_bit(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit(bit, addr);
-+}
-+
-+static inline void mb_clear_bit_atomic(int bit, void *addr)
-+{
-+ mb_correct_addr_and_bit(bit,addr);
-+ ext2_clear_bit_atomic(NULL, bit, addr);
-+}
-+
-+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
-+{
-+ int fix;
-+#if BITS_PER_LONG == 64
-+ fix = ((unsigned long) addr & 7UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~7UL);
-+#elif BITS_PER_LONG == 32
-+ fix = ((unsigned long) addr & 3UL) << 3;
-+ addr = (void *) ((unsigned long) addr & ~3UL);
-+#else
-+#error "how many bits you are?!"
-+#endif
-+ max += fix;
-+ start += fix;
-+ return ext2_find_next_zero_bit(addr, max, start) - fix;
-+}
-+
-+static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
-+{
-+ char *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(max != NULL);
-+
-+ if (order > e3b->bd_blkbits + 1) {
-+ *max = 0;
-+ return NULL;
-+ }
-+
-+ /* at order 0 we see each particular block */
-+ *max = 1 << (e3b->bd_blkbits + 3);
-+ if (order == 0)
-+ return EXT3_MB_BITMAP(e3b);
-+
-+ bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
-+ *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
-+
-+ return bb;
-+}
-+
-+#ifdef AGGRESSIVE_CHECK
-+
-+static void mb_check_buddy(struct ext3_buddy *e3b)
-+{
-+ int order = e3b->bd_blkbits + 1;
-+ int max, max2, i, j, k, count;
-+ int fragments = 0, fstart;
-+ void *buddy, *buddy2;
-+
-+ if (!test_opt(e3b->bd_sb, MBALLOC))
-+ return;
-+
-+ {
-+ static int mb_check_counter = 0;
-+ if (mb_check_counter++ % 300 != 0)
-+ return;
-+ }
-+
-+ while (order > 1) {
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ buddy2 = mb_find_buddy(e3b, order - 1, &max2);
-+ J_ASSERT(buddy2);
-+ J_ASSERT(buddy != buddy2);
-+ J_ASSERT(max * 2 == max2);
-+
-+ count = 0;
-+ for (i = 0; i < max; i++) {
-+
-+ if (mb_test_bit(i, buddy)) {
-+ /* only single bit in buddy2 may be 1 */
-+ if (!mb_test_bit(i << 1, buddy2))
-+ J_ASSERT(mb_test_bit((i<<1)+1, buddy2));
-+ else if (!mb_test_bit((i << 1) + 1, buddy2))
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ continue;
-+ }
-+
-+ /* both bits in buddy2 must be 0 */
-+ J_ASSERT(mb_test_bit(i << 1, buddy2));
-+ J_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-+
-+ for (j = 0; j < (1 << order); j++) {
-+ k = (i * (1 << order)) + j;
-+ J_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
-+ }
-+ count++;
-+ }
-+ J_ASSERT(e3b->bd_info->bb_counters[order] == count);
-+ order--;
-+ }
-+
-+ fstart = -1;
-+ buddy = mb_find_buddy(e3b, 0, &max);
-+ for (i = 0; i < max; i++) {
-+ if (!mb_test_bit(i, buddy)) {
-+ J_ASSERT(i >= e3b->bd_info->bb_first_free);
-+ if (fstart == -1) {
-+ fragments++;
-+ fstart = i;
-+ }
-+ continue;
-+ }
-+ fstart = -1;
-+ /* check used bits only */
-+ for (j = 0; j < e3b->bd_blkbits + 1; j++) {
-+ buddy2 = mb_find_buddy(e3b, j, &max2);
-+ k = i >> j;
-+ J_ASSERT(k < max2);
-+ J_ASSERT(mb_test_bit(k, buddy2));
-+ }
-+ }
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
-+ J_ASSERT(e3b->bd_info->bb_fragments == fragments);
-+}
-+
-+#else
-+#define mb_check_buddy(e3b)
-+#endif
-+
-+/* find most significant bit */
-+static int inline fmsb(unsigned short word)
-+{
-+ int order;
-+
-+ if (word > 255) {
-+ order = 7;
-+ word >>= 8;
-+ } else {
-+ order = -1;
-+ }
-+
-+ do {
-+ order++;
-+ word >>= 1;
-+ } while (word != 0);
-+
-+ return order;
-+}
-+
-+static void inline
-+ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
-+ int len, struct ext3_group_info *grp)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ unsigned short min, max, chunk, border;
-+
-+ mb_debug("mark %u/%u free\n", first, len);
-+ J_ASSERT(len < EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ border = 2 << sb->s_blocksize_bits;
-+
-+ while (len > 0) {
-+ /* find how many blocks can be covered since this position */
-+ max = ffs(first | border) - 1;
-+
-+ /* find how many blocks of power 2 we need to mark */
-+ min = fmsb(len);
-+
-+ mb_debug(" %u/%u -> max %u, min %u\n",
-+ first & ((2 << sb->s_blocksize_bits) - 1),
-+ len, max, min);
-+
-+ if (max < min)
-+ min = max;
-+ chunk = 1 << min;
-+
-+ /* mark multiblock chunks only */
-+ grp->bb_counters[min]++;
-+ if (min > 0) {
-+ mb_debug(" set %u at %u \n", first >> min,
-+ sbi->s_mb_offsets[min]);
-+ mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
-+ }
-+
-+ len -= chunk;
-+ first += chunk;
-+ }
-+}
-+
-+static void
-+ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
-+ int group)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
-+ unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
-+ unsigned short i = 0, first, len;
-+ unsigned free = 0, fragments = 0;
-+ unsigned long long period = get_cycles();
-+
-+ i = mb_find_next_zero_bit(bitmap, max, 0);
-+ grp->bb_first_free = i;
-+ while (i < max) {
-+ fragments++;
-+ first = i;
-+ i = ext2_find_next_le_bit(bitmap, max, i);
-+ len = i - first;
-+ free += len;
-+ if (len > 1)
-+ ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
-+ else
-+ grp->bb_counters[0]++;
-+ if (i < max)
-+ i = mb_find_next_zero_bit(bitmap, max, i);
-+ }
-+ grp->bb_fragments = fragments;
-+
-+ /* bb_state shouldn't being modified because all
-+ * others waits for init completion on page lock */
-+ clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
-+ if (free != grp->bb_free) {
-+ printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
-+ group, free, grp->bb_free);
-+ grp->bb_free = free;
-+ }
-+
-+ period = get_cycles() - period;
-+ spin_lock(&EXT3_SB(sb)->s_bal_lock);
-+ EXT3_SB(sb)->s_mb_buddies_generated++;
-+ EXT3_SB(sb)->s_mb_generation_time += period;
-+ spin_unlock(&EXT3_SB(sb)->s_bal_lock);
-+}
-+
-+static int ext3_mb_init_cache(struct page *page)
-+{
-+ int blocksize, blocks_per_page, groups_per_page;
-+ int err = 0, i, first_group, first_block;
-+ struct super_block *sb;
-+ struct buffer_head *bhs;
-+ struct buffer_head **bh;
-+ struct inode *inode;
-+ char *data, *bitmap;
-+
-+ mb_debug("init page %lu\n", page->index);
-+
-+ inode = page->mapping->host;
-+ sb = inode->i_sb;
-+ blocksize = 1 << inode->i_blkbits;
-+ blocks_per_page = PAGE_CACHE_SIZE / blocksize;
-+
-+ groups_per_page = blocks_per_page >> 1;
-+ if (groups_per_page == 0)
-+ groups_per_page = 1;
-+
-+ /* allocate buffer_heads to read bitmaps */
-+ if (groups_per_page > 1) {
-+ err = -ENOMEM;
-+ i = sizeof(struct buffer_head *) * groups_per_page;
-+ bh = kmalloc(i, GFP_NOFS);
-+ if (bh == NULL)
-+ goto out;
-+ memset(bh, 0, i);
-+ } else
-+ bh = &bhs;
-+
-+ first_group = page->index * blocks_per_page / 2;
-+
-+ /* read all groups the page covers into the cache */
-+ for (i = 0; i < groups_per_page; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ if (first_group + i >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ err = -EIO;
-+ desc = ext3_get_group_desc(sb, first_group + i, NULL);
-+ if (desc == NULL)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
-+ if (bh[i] == NULL)
-+ goto out;
-+
-+ if (buffer_uptodate(bh[i]))
-+ continue;
-+
-+ lock_buffer(bh[i]);
-+ if (buffer_uptodate(bh[i])) {
-+ unlock_buffer(bh[i]);
-+ continue;
-+ }
-+
-+ get_bh(bh[i]);
-+ bh[i]->b_end_io = end_buffer_read_sync;
-+ submit_bh(READ, bh[i]);
-+ mb_debug("read bitmap for group %u\n", first_group + i);
-+ }
-+
-+ /* wait for I/O completion */
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ wait_on_buffer(bh[i]);
-+
-+ err = -EIO;
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ if (!buffer_uptodate(bh[i]))
-+ goto out;
-+
-+ first_block = page->index * blocks_per_page;
-+ for (i = 0; i < blocks_per_page; i++) {
-+ int group;
-+
-+ group = (first_block + i) >> 1;
-+ if (group >= EXT3_SB(sb)->s_groups_count)
-+ break;
-+
-+ data = page_address(page) + (i * blocksize);
-+ bitmap = bh[group - first_group]->b_data;
-+
-+ if ((first_block + i) & 1) {
-+ /* this is block of buddy */
-+ mb_debug("put buddy for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memset(data, 0xff, blocksize);
-+ EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
-+ memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
-+ sizeof(unsigned short)*(sb->s_blocksize_bits+2));
-+ ext3_mb_generate_buddy(sb, data, bitmap, group);
-+ } else {
-+ /* this is block of bitmap */
-+ mb_debug("put bitmap for group %u in page %lu/%x\n",
-+ group, page->index, i * blocksize);
-+ memcpy(data, bitmap, blocksize);
-+ }
-+ }
-+ SetPageUptodate(page);
-+
-+out:
-+ if (bh) {
-+ for (i = 0; i < groups_per_page && bh[i]; i++)
-+ brelse(bh[i]);
-+ if (bh != &bhs)
-+ kfree(bh);
-+ }
-+ return err;
-+}
-+
-+static int ext3_mb_load_buddy(struct super_block *sb, int group,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *inode = sbi->s_buddy_cache;
-+ int blocks_per_page, block, pnum, poff;
-+ struct page *page;
-+
-+ mb_debug("load group %u\n", group);
-+
-+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-+
-+ e3b->bd_blkbits = sb->s_blocksize_bits;
-+ e3b->bd_info = EXT3_GROUP_INFO(sb, group);
-+ e3b->bd_sb = sb;
-+ e3b->bd_group = group;
-+ e3b->bd_buddy_page = NULL;
-+ e3b->bd_bitmap_page = NULL;
-+
-+ block = group * 2;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ /* we could use find_or_create_page(), but it locks page
-+ * what we'd like to avoid in fast path ... */
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_bitmap_page = page;
-+ e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ block++;
-+ pnum = block / blocks_per_page;
-+ poff = block % blocks_per_page;
-+
-+ page = find_get_page(inode->i_mapping, pnum);
-+ if (page == NULL || !PageUptodate(page)) {
-+ if (page)
-+ page_cache_release(page);
-+ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-+ if (page) {
-+ BUG_ON(page->mapping != inode->i_mapping);
-+ if (!PageUptodate(page))
-+ ext3_mb_init_cache(page);
-+ unlock_page(page);
-+ }
-+ }
-+ if (page == NULL || !PageUptodate(page))
-+ goto err;
-+ e3b->bd_buddy_page = page;
-+ e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-+ mark_page_accessed(page);
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ return 0;
-+
-+err:
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+ e3b->bd_buddy = NULL;
-+ e3b->bd_bitmap = NULL;
-+ return -EIO;
-+}
-+
-+static void ext3_mb_release_desc(struct ext3_buddy *e3b)
-+{
-+ if (e3b->bd_bitmap_page)
-+ page_cache_release(e3b->bd_bitmap_page);
-+ if (e3b->bd_buddy_page)
-+ page_cache_release(e3b->bd_buddy_page);
-+}
-+
-+
-+static inline void
-+ext3_lock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static inline void
-+ext3_unlock_group(struct super_block *sb, int group)
-+{
-+ bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
-+ &EXT3_GROUP_INFO(sb, group)->bb_state);
-+}
-+
-+static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
-+{
-+ int order = 1;
-+ void *bb;
-+
-+ J_ASSERT(EXT3_MB_BITMAP(e3b) != EXT3_MB_BUDDY(e3b));
-+ J_ASSERT(block < (1 << (e3b->bd_blkbits + 3)));
-+
-+ bb = EXT3_MB_BUDDY(e3b);
-+ while (order <= e3b->bd_blkbits + 1) {
-+ block = block >> 1;
-+ if (!mb_test_bit(block, bb)) {
-+ /* this block is part of buddy of order 'order' */
-+ return order;
-+ }
-+ bb += 1 << (e3b->bd_blkbits - order);
-+ order++;
-+ }
-+ return 0;
-+}
-+
-+static inline void mb_clear_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_clear_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static inline void mb_set_bits(void *bm, int cur, int len)
-+{
-+ __u32 *addr;
-+
-+ len = cur + len;
-+ while (cur < len) {
-+ if ((cur & 31) == 0 && (len - cur) >= 32) {
-+ /* fast path: clear whole word at once */
-+ addr = bm + (cur >> 3);
-+ *addr = 0xffffffff;
-+ cur += 32;
-+ continue;
-+ }
-+ mb_set_bit_atomic(cur, bm);
-+ cur++;
-+ }
-+}
-+
-+static int mb_free_blocks(struct ext3_buddy *e3b, int first, int count)
-+{
-+ int block = 0, max = 0, order;
-+ void *buddy, *buddy2;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free += count;
-+ if (first < e3b->bd_info->bb_first_free)
-+ e3b->bd_info->bb_first_free = first;
-+
-+ /* let's maintain fragments counter */
-+ if (first != 0)
-+ block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
-+ if (first + count < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
-+ if (block && max)
-+ e3b->bd_info->bb_fragments--;
-+ else if (!block && !max)
-+ e3b->bd_info->bb_fragments++;
-+
-+ /* let's maintain buddy itself */
-+ while (count-- > 0) {
-+ block = first++;
-+ order = 0;
-+
-+ J_ASSERT(mb_test_bit(block, EXT3_MB_BITMAP(e3b)));
-+ mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ /* start of the buddy */
-+ buddy = mb_find_buddy(e3b, order, &max);
-+
-+ do {
-+ block &= ~1UL;
-+ if (mb_test_bit(block, buddy) ||
-+ mb_test_bit(block + 1, buddy))
-+ break;
-+
-+ /* both the buddies are free, try to coalesce them */
-+ buddy2 = mb_find_buddy(e3b, order + 1, &max);
-+
-+ if (!buddy2)
-+ break;
-+
-+ if (order > 0) {
-+ /* for special purposes, we don't set
-+ * free bits in bitmap */
-+ mb_set_bit(block, buddy);
-+ mb_set_bit(block + 1, buddy);
-+ }
-+ e3b->bd_info->bb_counters[order]--;
-+ e3b->bd_info->bb_counters[order]--;
-+
-+ block = block >> 1;
-+ order++;
-+ e3b->bd_info->bb_counters[order]++;
-+
-+ mb_clear_bit(block, buddy2);
-+ buddy = buddy2;
-+ } while (1);
-+ }
-+ mb_check_buddy(e3b);
-+
-+ return 0;
-+}
-+
-+static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
-+ int needed, struct ext3_free_extent *ex)
-+{
-+ int next = block, max, ord;
-+ void *buddy;
-+
-+ J_ASSERT(ex != NULL);
-+
-+ buddy = mb_find_buddy(e3b, order, &max);
-+ J_ASSERT(buddy);
-+ J_ASSERT(block < max);
-+ if (mb_test_bit(block, buddy)) {
-+ ex->fe_len = 0;
-+ ex->fe_start = 0;
-+ ex->fe_group = 0;
-+ return 0;
-+ }
-+
-+ if (likely(order == 0)) {
-+ /* find actual order */
-+ order = mb_find_order_for_block(e3b, block);
-+ block = block >> order;
-+ }
-+
-+ ex->fe_len = 1 << order;
-+ ex->fe_start = block << order;
-+ ex->fe_group = e3b->bd_group;
-+
-+ /* calc difference from given start */
-+ next = next - ex->fe_start;
-+ ex->fe_len -= next;
-+ ex->fe_start += next;
-+
-+ while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
-+
-+ if (block + 1 >= max)
-+ break;
-+
-+ next = (block + 1) * (1 << order);
-+ if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
-+ break;
-+
-+ ord = mb_find_order_for_block(e3b, next);
-+
-+ order = ord;
-+ block = next >> order;
-+ ex->fe_len += 1 << order;
-+ }
-+
-+ J_ASSERT(ex->fe_start + ex->fe_len <= (1 << (e3b->bd_blkbits + 3)));
-+ return ex->fe_len;
-+}
-+
-+static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
-+{
-+ int ord, mlen = 0, max = 0, cur;
-+ int start = ex->fe_start;
-+ int len = ex->fe_len;
-+ unsigned ret = 0;
-+ int len0 = len;
-+ void *buddy;
-+
-+ mb_check_buddy(e3b);
-+
-+ e3b->bd_info->bb_free -= len;
-+ if (e3b->bd_info->bb_first_free == start)
-+ e3b->bd_info->bb_first_free += len;
-+
-+ /* let's maintain fragments counter */
-+ if (start != 0)
-+ mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
-+ if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
-+ max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
-+ if (mlen && max)
-+ e3b->bd_info->bb_fragments++;
-+ else if (!mlen && !max)
-+ e3b->bd_info->bb_fragments--;
-+
-+ /* let's maintain buddy itself */
-+ while (len) {
-+ ord = mb_find_order_for_block(e3b, start);
-+
-+ if (((start >> ord) << ord) == start && len >= (1 << ord)) {
-+ /* the whole chunk may be allocated at once! */
-+ mlen = 1 << ord;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ J_ASSERT((start >> ord) < max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+ start += mlen;
-+ len -= mlen;
-+ J_ASSERT(len >= 0);
-+ continue;
-+ }
-+
-+ /* store for history */
-+ if (ret == 0)
-+ ret = len | (ord << 16);
-+
-+ /* we have to split large buddy */
-+ J_ASSERT(ord > 0);
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_set_bit(start >> ord, buddy);
-+ e3b->bd_info->bb_counters[ord]--;
-+
-+ ord--;
-+ cur = (start >> ord) & ~1U;
-+ buddy = mb_find_buddy(e3b, ord, &max);
-+ mb_clear_bit(cur, buddy);
-+ mb_clear_bit(cur + 1, buddy);
-+ e3b->bd_info->bb_counters[ord]++;
-+ e3b->bd_info->bb_counters[ord]++;
-+ }
-+
-+ /* now drop all the bits in bitmap */
-+ mb_set_bits(EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
-+
-+ mb_check_buddy(e3b);
-+
-+ return ret;
-+}
-+
-+/*
-+ * Must be called under group lock!
-+ */
-+static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ unsigned long ret;
-+
-+ ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
-+ ret = mb_mark_used(e3b, &ac->ac_b_ex);
-+
-+ ac->ac_status = AC_STATUS_FOUND;
-+ ac->ac_tail = ret & 0xffff;
-+ ac->ac_buddy = ret >> 16;
-+
-+ /* hold in-core structures until allocated
-+ * blocks are marked non-free in on-disk bitmap */
-+ ac->ac_buddy_page = e3b->bd_buddy_page;
-+ page_cache_get(e3b->bd_buddy_page);
-+ ac->ac_bitmap_page = e3b->bd_bitmap_page;
-+ page_cache_get(e3b->bd_bitmap_page);
-+}
-+
-+/*
-+ * The routine checks whether found extent is good enough. If it is,
-+ * then the extent gets marked used and flag is set to the context
-+ * to stop scanning. Otherwise, the extent is compared with the
-+ * previous found extent and if new one is better, then it's stored
-+ * in the context. Later, the best found extent will be used, if
-+ * mballoc can't find good enough extent.
-+ *
-+ * FIXME: real allocation policy is to be designed yet!
-+ */
-+static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
-+ struct ext3_free_extent *ex,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent *bex = &ac->ac_b_ex;
-+ struct ext3_free_extent *gex = &ac->ac_g_ex;
-+
-+ J_ASSERT(ex->fe_len > 0);
-+ J_ASSERT(ex->fe_len < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+ J_ASSERT(ex->fe_start < (1 << ac->ac_sb->s_blocksize_bits) * 8);
-+
-+ ac->ac_found++;
-+
-+ /*
-+ * The special case - take what you catch first
-+ */
-+ if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * Let's check whether the chunk is good enough
-+ */
-+ if (ex->fe_len == gex->fe_len) {
-+ *bex = *ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ return;
-+ }
-+
-+ /*
-+ * If this is first found extent, just store it in the context
-+ */
-+ if (bex->fe_len == 0) {
-+ *bex = *ex;
-+ return;
-+ }
-+
-+ /*
-+ * If new found extent is better, store it in the context
-+ */
-+ if (bex->fe_len < gex->fe_len) {
-+ /* if the request isn't satisfied, any found extent
-+ * larger than previous best one is better */
-+ if (ex->fe_len > bex->fe_len)
-+ *bex = *ex;
-+ } else if (ex->fe_len > gex->fe_len) {
-+ /* if the request is satisfied, then we try to find
-+ * an extent that still satisfy the request, but is
-+ * smaller than previous one */
-+ *bex = *ex;
-+ }
-+
-+ /*
-+ * Let's scan at least few extents and don't pick up a first one
-+ */
-+ if (bex->fe_len > gex->fe_len && ac->ac_found > ext3_mb_min_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+
-+ /*
-+ * We don't want to scan for a whole year
-+ */
-+ if (ac->ac_found > ext3_mb_max_to_scan)
-+ ac->ac_status = AC_STATUS_BREAK;
-+}
-+
-+static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct ext3_free_extent ex = ac->ac_b_ex;
-+ int group = ex.fe_group, max, err;
-+
-+ J_ASSERT(ex.fe_len > 0);
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
-+
-+ if (max > 0) {
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ int group = ac->ac_g_ex.fe_group, max, err;
-+ struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
-+ struct ext3_super_block *es = sbi->s_es;
-+ struct ext3_free_extent ex;
-+
-+ err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
-+ if (err)
-+ return err;
-+
-+ ext3_lock_group(ac->ac_sb, group);
-+ max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
-+ ac->ac_g_ex.fe_len, &ex);
-+
-+ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
-+ unsigned long start;
-+ start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
-+ ex.fe_start + le32_to_cpu(es->s_first_data_block));
-+ if (start % sbi->s_stripe == 0) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ } else if (max >= ac->ac_g_ex.fe_len) {
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
-+ /* Sometimes, caller may want to merge even small
-+ * number of blocks to an existing extent */
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(ex.fe_group == ac->ac_g_ex.fe_group);
-+ J_ASSERT(ex.fe_start == ac->ac_g_ex.fe_start);
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ }
-+ ext3_unlock_group(ac->ac_sb, group);
-+
-+ ext3_mb_release_desc(e3b);
-+
-+ return 0;
-+}
-+
-+/*
-+ * The routine scans buddy structures (not bitmap!) from given order
-+ * to max order and tries to find big enough chunk to satisfy the req
-+ */
-+static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_group_info *grp = e3b->bd_info;
-+ void *buddy;
-+ int i, k, max;
-+
-+ J_ASSERT(ac->ac_2order > 0);
-+ for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
-+ if (grp->bb_counters[i] == 0)
-+ continue;
-+
-+ buddy = mb_find_buddy(e3b, i, &max);
-+ if (buddy == NULL) {
-+ printk(KERN_ALERT "looking for wrong order?\n");
-+ break;
-+ }
-+
-+ k = mb_find_next_zero_bit(buddy, max, 0);
-+ J_ASSERT(k < max);
-+
-+ ac->ac_found++;
-+
-+ ac->ac_b_ex.fe_len = 1 << i;
-+ ac->ac_b_ex.fe_start = k << i;
-+ ac->ac_b_ex.fe_group = e3b->bd_group;
-+
-+ ext3_mb_use_best_found(ac, e3b);
-+ J_ASSERT(ac->ac_b_ex.fe_len == ac->ac_g_ex.fe_len);
-+
-+ if (unlikely(ext3_mb_stats))
-+ atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
-+
-+ break;
-+ }
-+}
-+
-+/*
-+ * The routine scans the group and measures all found extents.
-+ * In order to optimize scanning, caller must pass number of
-+ * free blocks in the group, so the routine can know upper limit.
-+ */
-+static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ int i, free;
-+
-+ free = e3b->bd_info->bb_free;
-+ J_ASSERT(free > 0);
-+
-+ i = e3b->bd_info->bb_first_free;
-+
-+ while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-+ i = mb_find_next_zero_bit(bitmap, sb->s_blocksize * 8, i);
-+ if (i >= sb->s_blocksize * 8) {
-+ J_ASSERT(free == 0);
-+ break;
-+ }
-+
-+ mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
-+ J_ASSERT(ex.fe_len > 0);
-+ J_ASSERT(free >= ex.fe_len);
-+
-+ ext3_mb_measure_extent(ac, &ex, e3b);
-+
-+ i += ex.fe_len;
-+ free -= ex.fe_len;
-+ }
-+}
-+
-+/*
-+ * This is a special case for storages like raid5
-+ * we try to find stripe-aligned chunks for stripe-size requests
-+ */
-+static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
-+ struct ext3_buddy *e3b)
-+{
-+ struct super_block *sb = ac->ac_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ void *bitmap = EXT3_MB_BITMAP(e3b);
-+ struct ext3_free_extent ex;
-+ unsigned long i, max;
-+
-+ J_ASSERT(sbi->s_stripe != 0);
-+
-+ /* find first stripe-aligned block */
-+ i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb) +
-+ le32_to_cpu(sbi->s_es->s_first_data_block);
-+ i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
-+ i = (i - le32_to_cpu(sbi->s_es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+
-+ while (i < sb->s_blocksize * 8) {
-+ if (!mb_test_bit(i, bitmap)) {
-+ max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
-+ if (max >= sbi->s_stripe) {
-+ ac->ac_found++;
-+ ac->ac_b_ex = ex;
-+ ext3_mb_use_best_found(ac, e3b);
-+ break;
-+ }
-+ }
-+ i += sbi->s_stripe;
-+ }
-+}
-+
-+static int ext3_mb_good_group(struct ext3_allocation_context *ac,
-+ int group, int cr)
-+{
-+ struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
-+ unsigned free, fragments, i, bits;
-+
-+ J_ASSERT(cr >= 0 && cr < 4);
-+ J_ASSERT(!EXT3_MB_GRP_NEED_INIT(grp));
-+
-+ free = grp->bb_free;
-+ fragments = grp->bb_fragments;
-+ if (free == 0)
-+ return 0;
-+ if (fragments == 0)
-+ return 0;
-+
-+ switch (cr) {
-+ case 0:
-+ J_ASSERT(ac->ac_2order != 0);
-+ bits = ac->ac_sb->s_blocksize_bits + 1;
-+ for (i = ac->ac_2order; i <= bits; i++)
-+ if (grp->bb_counters[i] > 0)
-+ return 1;
-+ break;
-+ case 1:
-+ if ((free / fragments) >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 2:
-+ if (free >= ac->ac_g_ex.fe_len)
-+ return 1;
-+ break;
-+ case 3:
-+ return 1;
-+ default:
-+ BUG();
-+ }
-+
-+ return 0;
-+}
-+
-+int ext3_mb_new_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *len, int flags, int *errp)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_allocation_context ac;
-+ int i, group, block, cr, err = 0;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ struct buffer_head *gdp_bh;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+
-+ J_ASSERT(len != NULL);
-+ J_ASSERT(*len > 0);
-+
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk("ext3_mb_new_nblocks: nonexistent device");
-+ return 0;
-+ }
-+
-+ if (!test_opt(sb, MBALLOC)) {
-+ static int ext3_mballoc_warning = 0;
-+ if (ext3_mballoc_warning == 0) {
-+ printk(KERN_ERR "EXT3-fs: multiblock request with "
-+ "mballoc disabled!\n");
-+ ext3_mballoc_warning++;
-+ }
-+ *len = 1;
-+ err = ext3_new_block_old(handle, inode, goal, errp);
-+ return err;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+
-+ /*
-+ * We can't allocate > group size
-+ */
-+ if (*len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
-+ *len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* someone asks for non-reserved blocks */
-+ BUG_ON(*len > 1);
-+ err = ext3_mb_reserve_blocks(sb, 1);
-+ if (err) {
-+ *errp = err;
-+ return 0;
-+ }
-+ }
-+
-+ ac.ac_buddy_page = NULL;
-+ ac.ac_bitmap_page = NULL;
-+
-+ /*
-+ * Check quota for allocation of this blocks.
-+ */
-+ while (*len && DQUOT_ALLOC_BLOCK(inode, *len))
-+ *len -= 1;
-+ if (*len == 0) {
-+ *errp = -EDQUOT;
-+ block = 0;
-+ goto out;
-+ }
-+
-+ /* start searching from the goal */
-+ if (goal < le32_to_cpu(es->s_first_data_block) ||
-+ goal >= le32_to_cpu(es->s_blocks_count))
-+ goal = le32_to_cpu(es->s_first_data_block);
-+ group = (goal - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ block = ((goal - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb));
-+
-+ /* set up allocation goals */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_groups_scanned = 0;
-+ ac.ac_ex_scanned = 0;
-+ ac.ac_found = 0;
-+ ac.ac_sb = inode->i_sb;
-+ ac.ac_g_ex.fe_group = group;
-+ ac.ac_g_ex.fe_start = block;
-+ ac.ac_g_ex.fe_len = *len;
-+ ac.ac_flags = flags;
-+ ac.ac_2order = 0;
-+ ac.ac_criteria = 0;
-+
-+ if (*len == 1 && sbi->s_stripe) {
-+ /* looks like a metadata, let's use a dirty hack for raid5
-+ * move all metadata in first groups in hope to hit cached
-+ * sectors and thus avoid read-modify cycles in raid5 */
-+ ac.ac_g_ex.fe_group = group = 0;
-+ }
-+
-+ /* probably, the request is for 2^8+ blocks (1/2/3/... MB) */
-+ i = ffs(*len);
-+ if (i >= ext3_mb_order2_reqs) {
-+ i--;
-+ if ((*len & (~(1 << i))) == 0)
-+ ac.ac_2order = i;
-+ }
-+
-+ /* first, try the goal */
-+ err = ext3_mb_find_by_goal(&ac, &e3b);
-+ if (err)
-+ goto out_err;
-+ if (ac.ac_status == AC_STATUS_FOUND)
-+ goto found;
-+
-+ /* Let's just scan groups to find more-less suitable blocks */
-+ cr = ac.ac_2order ? 0 : 1;
-+repeat:
-+ for (; cr < 4 && ac.ac_status == AC_STATUS_CONTINUE; cr++) {
-+ ac.ac_criteria = cr;
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
-+ if (group == EXT3_SB(sb)->s_groups_count)
-+ group = 0;
-+
-+ if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
-+ /* we need full data about the group
-+ * to make a good selection */
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+ ext3_mb_release_desc(&e3b);
-+ }
-+
-+ /* check is group good for our criteries */
-+ if (!ext3_mb_good_group(&ac, group, cr))
-+ continue;
-+
-+ err = ext3_mb_load_buddy(ac.ac_sb, group, &e3b);
-+ if (err)
-+ goto out_err;
-+
-+ ext3_lock_group(sb, group);
-+ if (!ext3_mb_good_group(&ac, group, cr)) {
-+ /* someone did allocation from this group */
-+ ext3_unlock_group(sb, group);
-+ ext3_mb_release_desc(&e3b);
-+ continue;
-+ }
-+
-+ ac.ac_groups_scanned++;
-+ if (cr == 0)
-+ ext3_mb_simple_scan_group(&ac, &e3b);
-+ else if (cr == 1 && *len == sbi->s_stripe)
-+ ext3_mb_scan_aligned(&ac, &e3b);
-+ else
-+ ext3_mb_complex_scan_group(&ac, &e3b);
-+
-+ ext3_unlock_group(sb, group);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ if (ac.ac_status != AC_STATUS_CONTINUE)
-+ break;
-+ }
-+ }
-+
-+ if (ac.ac_b_ex.fe_len > 0 && ac.ac_status != AC_STATUS_FOUND &&
-+ !(ac.ac_flags & EXT3_MB_HINT_FIRST)) {
-+ /*
-+ * We've been searching too long. Let's try to allocate
-+ * the best chunk we've found so far
-+ */
-+
-+ /*if (ac.ac_found > ext3_mb_max_to_scan)
-+ printk(KERN_DEBUG "EXT3-fs: too long searching at "
-+ "%u (%d/%d)\n", cr, ac.ac_b_ex.fe_len,
-+ ac.ac_g_ex.fe_len);*/
-+ ext3_mb_try_best_found(&ac, &e3b);
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * Someone more lucky has already allocated it.
-+ * The only thing we can do is just take first
-+ * found block(s)
-+ printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
-+ */
-+ ac.ac_b_ex.fe_group = 0;
-+ ac.ac_b_ex.fe_start = 0;
-+ ac.ac_b_ex.fe_len = 0;
-+ ac.ac_status = AC_STATUS_CONTINUE;
-+ ac.ac_flags |= EXT3_MB_HINT_FIRST;
-+ cr = 3;
-+ goto repeat;
-+ }
-+ }
-+
-+ if (ac.ac_status != AC_STATUS_FOUND) {
-+ /*
-+ * We aren't lucky definitely
-+ */
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ *errp = -ENOSPC;
-+ block = 0;
-+#if 1
-+ printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
-+ ac.ac_status, ac.ac_flags);
-+ printk(KERN_ERR "EXT3-fs: goal %d, best found %d/%d/%d cr %d\n",
-+ ac.ac_g_ex.fe_len, ac.ac_b_ex.fe_group,
-+ ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len, cr);
-+ printk(KERN_ERR "EXT3-fs: %lu block reserved, %d found\n",
-+ sbi->s_blocks_reserved, ac.ac_found);
-+ printk("EXT3-fs: groups: ");
-+ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++)
-+ printk("%d: %d ", i, EXT3_GROUP_INFO(sb, i)->bb_free);
-+ printk("\n");
-+#endif
-+ goto out;
-+ }
-+
-+found:
-+ J_ASSERT(ac.ac_b_ex.fe_len > 0);
-+
-+ /* good news - free block(s) have been found. now it's time
-+ * to mark block(s) in good old journaled bitmap */
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ /* we made a desicion, now mark found blocks in good old
-+ * bitmap to be journaled */
-+
-+ ext3_debug("using block group %d(%d)\n",
-+ ac.ac_b_group.group, gdp->bg_free_blocks_count);
-+
-+ bitmap_bh = read_block_bitmap(sb, ac.ac_b_ex.fe_group);
-+ if (!bitmap_bh) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) {
-+ *errp = err;
-+ goto out_err;
-+ }
-+
-+ gdp = ext3_get_group_desc(sb, ac.ac_b_ex.fe_group, &gdp_bh);
-+ if (!gdp) {
-+ *errp = -EIO;
-+ goto out_err;
-+ }
-+
-+ err = ext3_journal_get_write_access(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ block = ac.ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
-+ + ac.ac_b_ex.fe_start
-+ + le32_to_cpu(es->s_first_data_block);
-+
-+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
-+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
-+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error(sb, "ext3_new_block",
-+ "Allocating block in system zone - "
-+ "block = %u", block);
-+#ifdef AGGRESSIVE_CHECK
-+ for (i = 0; i < ac.ac_b_ex.fe_len; i++)
-+ J_ASSERT(!mb_test_bit(ac.ac_b_ex.fe_start + i, bitmap_bh->b_data));
-+#endif
-+ mb_set_bits(bitmap_bh->b_data, ac.ac_b_ex.fe_start, ac.ac_b_ex.fe_len);
-+
-+ spin_lock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-+ - ac.ac_b_ex.fe_len);
-+ spin_unlock(sb_bgl_lock(sbi, ac.ac_b_ex.fe_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, - ac.ac_b_ex.fe_len);
-+
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err)
-+ goto out_err;
-+ err = ext3_journal_dirty_metadata(handle, gdp_bh);
-+ if (err)
-+ goto out_err;
-+
-+ sb->s_dirt = 1;
-+ *errp = 0;
-+ brelse(bitmap_bh);
-+
-+ /* drop non-allocated, but dquote'd blocks */
-+ J_ASSERT(*len >= ac.ac_b_ex.fe_len);
-+ DQUOT_FREE_BLOCK(inode, *len - ac.ac_b_ex.fe_len);
-+
-+ *len = ac.ac_b_ex.fe_len;
-+ J_ASSERT(*len > 0);
-+ J_ASSERT(block != 0);
-+ goto out;
-+
-+out_err:
-+ /* if we've already allocated something, roll it back */
-+ if (ac.ac_status == AC_STATUS_FOUND) {
-+ /* FIXME: free blocks here */
-+ }
-+
-+ DQUOT_FREE_BLOCK(inode, *len);
-+ brelse(bitmap_bh);
-+ *errp = err;
-+ block = 0;
-+out:
-+ if (ac.ac_buddy_page)
-+ page_cache_release(ac.ac_buddy_page);
-+ if (ac.ac_bitmap_page)
-+ page_cache_release(ac.ac_bitmap_page);
-+
-+ if (!(flags & EXT3_MB_HINT_RESERVED)) {
-+ /* block wasn't reserved before and we reserved it
-+ * at the beginning of allocation. it doesn't matter
-+ * whether we allocated anything or we failed: time
-+ * to release reservation. NOTE: because I expect
-+ * any multiblock request from delayed allocation
-+ * path only, here is single block always */
-+ ext3_mb_release_blocks(sb, 1);
-+ }
-+
-+ if (unlikely(ext3_mb_stats) && ac.ac_g_ex.fe_len > 1) {
-+ atomic_inc(&sbi->s_bal_reqs);
-+ atomic_add(*len, &sbi->s_bal_allocated);
-+ if (*len >= ac.ac_g_ex.fe_len)
-+ atomic_inc(&sbi->s_bal_success);
-+ atomic_add(ac.ac_found, &sbi->s_bal_ex_scanned);
-+ if (ac.ac_g_ex.fe_start == ac.ac_b_ex.fe_start &&
-+ ac.ac_g_ex.fe_group == ac.ac_b_ex.fe_group)
-+ atomic_inc(&sbi->s_bal_goals);
-+ if (ac.ac_found > ext3_mb_max_to_scan)
-+ atomic_inc(&sbi->s_bal_breaks);
-+ }
-+
-+ ext3_mb_store_history(sb, inode->i_ino, &ac);
-+
-+ return block;
-+}
-+EXPORT_SYMBOL(ext3_mb_new_blocks);
-+
-+#ifdef EXT3_MB_HISTORY
-+struct ext3_mb_proc_session {
-+ struct ext3_mb_history *history;
-+ struct super_block *sb;
-+ int start;
-+ int max;
-+};
-+
-+static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
-+ struct ext3_mb_history *hs,
-+ int first)
-+{
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (!first && hs == s->history + s->start)
-+ return NULL;
-+ while (hs->goal.fe_len == 0) {
-+ hs++;
-+ if (hs == s->history + s->max)
-+ hs = s->history;
-+ if (hs == s->history + s->start)
-+ return NULL;
-+ }
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs;
-+ int l = *pos;
-+
-+ if (l == 0)
-+ return SEQ_START_TOKEN;
-+ hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ if (!hs)
-+ return NULL;
-+ while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-+ return hs;
-+}
-+
-+static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct ext3_mb_proc_session *s = seq->private;
-+ struct ext3_mb_history *hs = v;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
-+ else
-+ return ext3_mb_history_skip_empty(s, ++hs, 0);
-+}
-+
-+static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
-+{
-+ struct ext3_mb_history *hs = v;
-+ char buf[20], buf2[20];
-+
-+ if (v == SEQ_START_TOKEN) {
-+ seq_printf(seq, "%-5s %-8s %-17s %-17s %-5s %-5s %-2s %-5s %-5s %-6s\n",
-+ "pid", "inode", "goal", "result", "found", "grps", "cr",
-+ "merge", "tail", "broken");
-+ return 0;
-+ }
-+
-+ sprintf(buf, "%u/%u/%u", hs->goal.fe_group,
-+ hs->goal.fe_start, hs->goal.fe_len);
-+ sprintf(buf2, "%u/%u/%u", hs->result.fe_group,
-+ hs->result.fe_start, hs->result.fe_len);
-+ seq_printf(seq, "%-5u %-8u %-17s %-17s %-5u %-5u %-2u %-5s %-5u %-6u\n",
-+ hs->pid, hs->ino, buf, buf2, hs->found, hs->groups,
-+ hs->cr, hs->merged ? "M" : "", hs->tail,
-+ hs->buddy ? 1 << hs->buddy : 0);
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_history_ops = {
-+ .start = ext3_mb_seq_history_start,
-+ .next = ext3_mb_seq_history_next,
-+ .stop = ext3_mb_seq_history_stop,
-+ .show = ext3_mb_seq_history_show,
-+};
-+
-+static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_proc_session *s;
-+ int rc, size;
-+
-+ s = kmalloc(sizeof(*s), GFP_KERNEL);
-+ if (s == NULL)
-+ return -EIO;
-+ size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
-+ s->history = kmalloc(size, GFP_KERNEL);
-+ if (s == NULL) {
-+ kfree(s);
-+ return -EIO;
-+ }
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(s->history, sbi->s_mb_history, size);
-+ s->max = sbi->s_mb_history_max;
-+ s->start = sbi->s_mb_history_cur % s->max;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+
-+ rc = seq_open(file, &ext3_mb_seq_history_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = s;
-+ } else {
-+ kfree(s->history);
-+ kfree(s);
-+ }
-+ return rc;
-+
-+}
-+
-+static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
-+{
-+ struct seq_file *seq = (struct seq_file *)file->private_data;
-+ struct ext3_mb_proc_session *s = seq->private;
-+ kfree(s->history);
-+ kfree(s);
-+ return seq_release(inode, file);
-+}
-+
-+static struct file_operations ext3_mb_seq_history_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_history_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = ext3_mb_seq_history_release,
-+};
-+
-+static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+
-+ group = *pos + 1;
-+ return (void *) group;
-+}
-+
-+static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct super_block *sb = seq->private;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ long group;
-+
-+ ++*pos;
-+ if (*pos < 0 || *pos >= sbi->s_groups_count)
-+ return NULL;
-+ group = *pos + 1;
-+ return (void *) group;;
-+}
-+
-+static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
-+{
-+ struct super_block *sb = seq->private;
-+ long group = (long) v, i;
-+ struct sg {
-+ struct ext3_group_info info;
-+ unsigned short counters[16];
-+ } sg;
-+
-+ group--;
-+ if (group == 0)
-+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s [ %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
-+ "group", "free", "frags", "first", "2^0", "2^1", "2^2",
-+ "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10",
-+ "2^11", "2^12", "2^13");
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
-+ sizeof(struct ext3_group_info);
-+ ext3_lock_group(sb, group);
-+ memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
-+ ext3_unlock_group(sb, group);
-+
-+ if (EXT3_MB_GRP_NEED_INIT(&sg.info))
-+ return 0;
-+
-+ seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
-+ sg.info.bb_fragments, sg.info.bb_first_free);
-+ for (i = 0; i <= 13; i++)
-+ seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
-+ sg.info.bb_counters[i] : 0);
-+ seq_printf(seq, " ]\n");
-+
-+ return 0;
-+}
-+
-+static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static struct seq_operations ext3_mb_seq_groups_ops = {
-+ .start = ext3_mb_seq_groups_start,
-+ .next = ext3_mb_seq_groups_next,
-+ .stop = ext3_mb_seq_groups_stop,
-+ .show = ext3_mb_seq_groups_show,
-+};
-+
-+static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
-+{
-+ struct super_block *sb = PDE(inode)->data;
-+ int rc;
-+
-+ rc = seq_open(file, &ext3_mb_seq_groups_ops);
-+ if (rc == 0) {
-+ struct seq_file *m = (struct seq_file *)file->private_data;
-+ m->private = sb;
-+ }
-+ return rc;
-+
-+}
-+
-+static struct file_operations ext3_mb_seq_groups_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ext3_mb_seq_groups_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void ext3_mb_history_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ remove_proc_entry("mb_groups", sbi->s_mb_proc);
-+ remove_proc_entry("mb_history", sbi->s_mb_proc);
-+ remove_proc_entry(name, proc_root_ext3);
-+
-+ if (sbi->s_mb_history)
-+ kfree(sbi->s_mb_history);
-+}
-+
-+static void ext3_mb_history_init(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ char name[64];
-+ int i;
-+
-+ snprintf(name, sizeof(name) - 1, "%s", bdevname(sb->s_bdev, name));
-+ sbi->s_mb_proc = proc_mkdir(name, proc_root_ext3);
-+ if (sbi->s_mb_proc != NULL) {
-+ struct proc_dir_entry *p;
-+ p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_history_fops;
-+ p->data = sb;
-+ }
-+ p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-+ if (p) {
-+ p->proc_fops = &ext3_mb_seq_groups_fops;
-+ p->data = sb;
-+ }
-+ }
-+
-+ sbi->s_mb_history_max = 1000;
-+ sbi->s_mb_history_cur = 0;
-+ spin_lock_init(&sbi->s_mb_history_lock);
-+ i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
-+ sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
-+ memset(sbi->s_mb_history, 0, i);
-+ /* if we can't allocate history, then we simple won't use it */
-+}
-+
-+static void
-+ext3_mb_store_history(struct super_block *sb, unsigned ino,
-+ struct ext3_allocation_context *ac)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_mb_history h;
-+
-+ if (likely(sbi->s_mb_history == NULL))
-+ return;
-+
-+ h.pid = current->pid;
-+ h.ino = ino;
-+ h.goal = ac->ac_g_ex;
-+ h.result = ac->ac_b_ex;
-+ h.found = ac->ac_found;
-+ h.cr = ac->ac_criteria;
-+ h.groups = ac->ac_groups_scanned;
-+ h.tail = ac->ac_tail;
-+ h.buddy = ac->ac_buddy;
-+ h.merged = 0;
-+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-+ h.merged = 1;
-+
-+ spin_lock(&sbi->s_mb_history_lock);
-+ memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-+ if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-+ sbi->s_mb_history_cur = 0;
-+ spin_unlock(&sbi->s_mb_history_lock);
-+}
-+
-+#else
-+#define ext3_mb_history_release(sb)
-+#define ext3_mb_history_init(sb)
-+#endif
-+
-+int ext3_mb_init_backend(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, j, len, metalen;
-+ int num_meta_group_infos =
-+ (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ struct ext3_group_info **meta_group_info;
-+
-+ /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
-+ * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
-+ * So a two level scheme suffices for now. */
-+ sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-+ num_meta_group_infos, GFP_KERNEL);
-+ if (sbi->s_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
-+ return -ENOMEM;
-+ }
-+ sbi->s_buddy_cache = new_inode(sb);
-+ if (sbi->s_buddy_cache == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't get new inode\n");
-+ goto err_freesgi;
-+ }
-+
-+ metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++) {
-+ if ((i + 1) == num_meta_group_infos)
-+ metalen = sizeof(*meta_group_info) *
-+ (sbi->s_groups_count -
-+ (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
-+ meta_group_info = kmalloc(metalen, GFP_KERNEL);
-+ if (meta_group_info == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
-+ "buddy group\n");
-+ goto err_freemeta;
-+ }
-+ sbi->s_group_info[i] = meta_group_info;
-+ }
-+
-+ /*
-+ * calculate needed size. if change bb_counters size,
-+ * don't forget about ext3_mb_generate_buddy()
-+ */
-+ len = sizeof(struct ext3_group_info);
-+ len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
-+ for (i = 0; i < sbi->s_groups_count; i++) {
-+ struct ext3_group_desc * desc;
-+
-+ meta_group_info =
-+ sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
-+ j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
-+
-+ meta_group_info[j] = kmalloc(len, GFP_KERNEL);
-+ if (meta_group_info[j] == NULL) {
-+ printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
-+ i--;
-+ goto err_freebuddy;
-+ }
-+ desc = ext3_get_group_desc(sb, i, NULL);
-+ if (desc == NULL) {
-+ printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
-+ goto err_freebuddy;
-+ }
-+ memset(meta_group_info[j], 0, len);
-+ set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
-+ &meta_group_info[j]->bb_state);
-+ meta_group_info[j]->bb_free =
-+ le16_to_cpu(desc->bg_free_blocks_count);
-+ }
-+
-+ return 0;
-+
-+err_freebuddy:
-+ while (i >= 0) {
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ i--;
-+ }
-+ i = num_meta_group_infos;
-+err_freemeta:
-+ while (--i >= 0)
-+ kfree(sbi->s_group_info[i]);
-+ iput(sbi->s_buddy_cache);
-+err_freesgi:
-+ kfree(sbi->s_group_info);
-+ return -ENOMEM;
-+}
-+
-+int ext3_mb_init(struct super_block *sb, int needs_recovery)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct inode *root = sb->s_root->d_inode;
-+ unsigned i, offset, max;
-+ struct dentry *dentry;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
-+
-+ sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_offsets == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ return -ENOMEM;
-+ }
-+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
-+ if (sbi->s_mb_maxs == NULL) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_maxs);
-+ return -ENOMEM;
-+ }
-+
-+ /* order 0 is regular bitmap */
-+ sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
-+ sbi->s_mb_offsets[0] = 0;
-+
-+ i = 1;
-+ offset = 0;
-+ max = sb->s_blocksize << 2;
-+ do {
-+ sbi->s_mb_offsets[i] = offset;
-+ sbi->s_mb_maxs[i] = max;
-+ offset += 1 << (sb->s_blocksize_bits - i);
-+ max = max >> 1;
-+ i++;
-+ } while (i <= sb->s_blocksize_bits + 1);
-+
-+ /* init file for buddy data */
-+ if ((i = ext3_mb_init_backend(sb))) {
-+ clear_opt(sbi->s_mount_opt, MBALLOC);
-+ kfree(sbi->s_mb_offsets);
-+ kfree(sbi->s_mb_maxs);
-+ return i;
-+ }
-+
-+ spin_lock_init(&sbi->s_reserve_lock);
-+ spin_lock_init(&sbi->s_md_lock);
-+ INIT_LIST_HEAD(&sbi->s_active_transaction);
-+ INIT_LIST_HEAD(&sbi->s_closed_transaction);
-+ INIT_LIST_HEAD(&sbi->s_committed_transaction);
-+ spin_lock_init(&sbi->s_bal_lock);
-+
-+ /* remove old on-disk buddy file */
-+ down(&root->i_sem);
-+ dentry = lookup_one_len(".buddy", sb->s_root, strlen(".buddy"));
-+ if (dentry->d_inode != NULL) {
-+ i = vfs_unlink(root, dentry);
-+ if (i != 0)
-+ printk("EXT3-fs: can't remove .buddy file: %d\n", i);
-+ }
-+ dput(dentry);
-+ up(&root->i_sem);
-+
-+ ext3_mb_history_init(sb);
-+
-+ printk("EXT3-fs: mballoc enabled\n");
-+ return 0;
-+}
-+
-+int ext3_mb_release(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int i, num_meta_group_infos;
-+
-+ if (!test_opt(sb, MBALLOC))
-+ return 0;
-+
-+ /* release freed, non-committed blocks */
-+ spin_lock(&sbi->s_md_lock);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_committed_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ ext3_mb_free_committed_blocks(sb);
-+
-+ if (sbi->s_group_info) {
-+ for (i = 0; i < sbi->s_groups_count; i++)
-+ kfree(EXT3_GROUP_INFO(sb, i));
-+ num_meta_group_infos = (sbi->s_groups_count +
-+ EXT3_DESC_PER_BLOCK(sb) - 1) >>
-+ EXT3_DESC_PER_BLOCK_BITS(sb);
-+ for (i = 0; i < num_meta_group_infos; i++)
-+ kfree(sbi->s_group_info[i]);
-+ kfree(sbi->s_group_info);
-+ }
-+ if (sbi->s_mb_offsets)
-+ kfree(sbi->s_mb_offsets);
-+ if (sbi->s_mb_maxs)
-+ kfree(sbi->s_mb_maxs);
-+ if (sbi->s_buddy_cache)
-+ iput(sbi->s_buddy_cache);
-+ if (sbi->s_blocks_reserved)
-+ printk("ext3-fs: %ld blocks being reserved at umount!\n",
-+ sbi->s_blocks_reserved);
-+ if (ext3_mb_stats) {
-+ printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
-+ atomic_read(&sbi->s_bal_allocated),
-+ atomic_read(&sbi->s_bal_reqs),
-+ atomic_read(&sbi->s_bal_success));
-+ printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
-+ "%u 2^N hits, %u breaks\n",
-+ atomic_read(&sbi->s_bal_ex_scanned),
-+ atomic_read(&sbi->s_bal_goals),
-+ atomic_read(&sbi->s_bal_2orders),
-+ atomic_read(&sbi->s_bal_breaks));
-+ printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
-+ sbi->s_mb_buddies_generated++,
-+ sbi->s_mb_generation_time);
-+ }
-+
-+ ext3_mb_history_release(sb);
-+
-+ return 0;
-+}
-+
-+void ext3_mb_free_committed_blocks(struct super_block *sb)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int err, i, count = 0, count2 = 0;
-+ struct ext3_free_metadata *md;
-+ struct ext3_buddy e3b;
-+
-+ if (list_empty(&sbi->s_committed_transaction))
-+ return;
-+
-+ /* there is committed blocks to be freed yet */
-+ do {
-+ /* get next array of blocks */
-+ md = NULL;
-+ spin_lock(&sbi->s_md_lock);
-+ if (!list_empty(&sbi->s_committed_transaction)) {
-+ md = list_entry(sbi->s_committed_transaction.next,
-+ struct ext3_free_metadata, list);
-+ list_del(&md->list);
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ if (md == NULL)
-+ break;
-+
-+ mb_debug("gonna free %u blocks in group %u (0x%p):",
-+ md->num, md->group, md);
-+
-+ err = ext3_mb_load_buddy(sb, md->group, &e3b);
-+ /* we expect to find existing buddy because it's pinned */
-+ BUG_ON(err != 0);
-+
-+ /* there are blocks to put in buddy to make them really free */
-+ count += md->num;
-+ count2++;
-+ ext3_lock_group(sb, md->group);
-+ for (i = 0; i < md->num; i++) {
-+ mb_debug(" %u", md->blocks[i]);
-+ mb_free_blocks(&e3b, md->blocks[i], 1);
-+ }
-+ mb_debug("\n");
-+ ext3_unlock_group(sb, md->group);
-+
-+ /* balance refcounts from ext3_mb_free_metadata() */
-+ page_cache_release(e3b.bd_buddy_page);
-+ page_cache_release(e3b.bd_bitmap_page);
-+
-+ kfree(md);
-+ ext3_mb_release_desc(&e3b);
-+
-+ } while (md);
-+ mb_debug("freed %u blocks in %u structures\n", count, count2);
-+}
-+
-+void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ if (sbi->s_last_transaction == handle->h_transaction->t_tid)
-+ return;
-+
-+ /* new transaction! time to close last one and free blocks for
-+ * committed transaction. we know that only transaction can be
-+ * active, so previos transaction can be being logged and we
-+ * know that transaction before previous is known to be already
-+ * logged. this means that now we may free blocks freed in all
-+ * transactions before previous one. hope I'm clear enough ... */
-+
-+ spin_lock(&sbi->s_md_lock);
-+ if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
-+ mb_debug("new transaction %lu, old %lu\n",
-+ (unsigned long) handle->h_transaction->t_tid,
-+ (unsigned long) sbi->s_last_transaction);
-+ list_splice_init(&sbi->s_closed_transaction,
-+ &sbi->s_committed_transaction);
-+ list_splice_init(&sbi->s_active_transaction,
-+ &sbi->s_closed_transaction);
-+ sbi->s_last_transaction = handle->h_transaction->t_tid;
-+ }
-+ spin_unlock(&sbi->s_md_lock);
-+
-+ ext3_mb_free_committed_blocks(sb);
-+}
-+
-+int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
-+ int group, int block, int count)
-+{
-+ struct ext3_group_info *db = e3b->bd_info;
-+ struct super_block *sb = e3b->bd_sb;
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ struct ext3_free_metadata *md;
-+ int i;
-+
-+ J_ASSERT(e3b->bd_bitmap_page != NULL);
-+ J_ASSERT(e3b->bd_buddy_page != NULL);
-+
-+ ext3_lock_group(sb, group);
-+ for (i = 0; i < count; i++) {
-+ md = db->bb_md_cur;
-+ if (md && db->bb_tid != handle->h_transaction->t_tid) {
-+ db->bb_md_cur = NULL;
-+ md = NULL;
-+ }
-+
-+ if (md == NULL) {
-+ ext3_unlock_group(sb, group);
-+ md = kmalloc(sizeof(*md), GFP_KERNEL);
-+ if (md == NULL)
-+ return -ENOMEM;
-+ md->num = 0;
-+ md->group = group;
-+
-+ ext3_lock_group(sb, group);
-+ if (db->bb_md_cur == NULL) {
-+ spin_lock(&sbi->s_md_lock);
-+ list_add(&md->list, &sbi->s_active_transaction);
-+ spin_unlock(&sbi->s_md_lock);
-+ /* protect buddy cache from being freed,
-+ * otherwise we'll refresh it from
-+ * on-disk bitmap and lose not-yet-available
-+ * blocks */
-+ page_cache_get(e3b->bd_buddy_page);
-+ page_cache_get(e3b->bd_bitmap_page);
-+ db->bb_md_cur = md;
-+ db->bb_tid = handle->h_transaction->t_tid;
-+ mb_debug("new md 0x%p for group %u\n",
-+ md, md->group);
-+ } else {
-+ kfree(md);
-+ md = db->bb_md_cur;
-+ }
-+ }
-+
-+ BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
-+ md->blocks[md->num] = block + i;
-+ md->num++;
-+ if (md->num == EXT3_BB_MAX_BLOCKS) {
-+ /* no more space, put full container on a sb's list */
-+ db->bb_md_cur = NULL;
-+ }
-+ }
-+ ext3_unlock_group(sb, group);
-+ return 0;
-+}
-+
-+void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
-+ unsigned long block, unsigned long count,
-+ int metadata, int *freed)
-+{
-+ struct buffer_head *bitmap_bh = NULL;
-+ struct ext3_group_desc *gdp;
-+ struct ext3_super_block *es;
-+ unsigned long bit, overflow;
-+ struct buffer_head *gd_bh;
-+ unsigned long block_group;
-+ struct ext3_sb_info *sbi;
-+ struct super_block *sb;
-+ struct ext3_buddy e3b;
-+ int err = 0, ret;
-+
-+ *freed = 0;
-+ sb = inode->i_sb;
-+ if (!sb) {
-+ printk ("ext3_free_blocks: nonexistent device");
-+ return;
-+ }
-+
-+ ext3_mb_poll_new_transaction(sb, handle);
-+
-+ sbi = EXT3_SB(sb);
-+ es = EXT3_SB(sb)->s_es;
-+ if (block < le32_to_cpu(es->s_first_data_block) ||
-+ block + count < block ||
-+ block + count > le32_to_cpu(es->s_blocks_count)) {
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks not in datazone - "
-+ "block = %lu, count = %lu", block, count);
-+ goto error_return;
-+ }
-+
-+ ext3_debug("freeing block %lu\n", block);
-+
-+do_more:
-+ overflow = 0;
-+ block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ bit = (block - le32_to_cpu(es->s_first_data_block)) %
-+ EXT3_BLOCKS_PER_GROUP(sb);
-+ /*
-+ * Check to see if we are freeing blocks across a group
-+ * boundary.
-+ */
-+ if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-+ overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-+ count -= overflow;
-+ }
-+ brelse(bitmap_bh);
-+ bitmap_bh = read_block_bitmap(sb, block_group);
-+ if (!bitmap_bh)
-+ goto error_return;
-+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
-+ if (!gdp)
-+ goto error_return;
-+
-+ if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
-+ in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
-+ in_range (block, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group) ||
-+ in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
-+ EXT3_SB(sb)->s_itb_per_group))
-+ ext3_error (sb, "ext3_free_blocks",
-+ "Freeing blocks in system zones - "
-+ "Block = %lu, count = %lu",
-+ block, count);
-+
-+ BUFFER_TRACE(bitmap_bh, "getting write access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err)
-+ goto error_return;
-+
-+ /*
-+ * We are about to modify some metadata. Call the journal APIs
-+ * to unshare ->b_data if a currently-committing transaction is
-+ * using it
-+ */
-+ BUFFER_TRACE(gd_bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, gd_bh);
-+ if (err)
-+ goto error_return;
-+
-+ err = ext3_mb_load_buddy(sb, block_group, &e3b);
-+ if (err)
-+ goto error_return;
-+
-+#ifdef AGGRESSIVE_CHECK
-+ {
-+ int i;
-+ for (i = 0; i < count; i++)
-+ J_ASSERT(mb_test_bit(bit + i, bitmap_bh->b_data));
-+ }
-+#endif
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
-+
-+ /* We dirtied the bitmap block */
-+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+
-+ if (metadata) {
-+ /* blocks being freed are metadata. these blocks shouldn't
-+ * be used until this transaction is committed */
-+ ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
-+ } else {
-+ ext3_lock_group(sb, block_group);
-+ mb_free_blocks(&e3b, bit, count);
-+ ext3_unlock_group(sb, block_group);
-+ }
-+
-+ spin_lock(sb_bgl_lock(sbi, block_group));
-+ gdp->bg_free_blocks_count =
-+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
-+ spin_unlock(sb_bgl_lock(sbi, block_group));
-+ percpu_counter_mod(&sbi->s_freeblocks_counter, count);
-+
-+ ext3_mb_release_desc(&e3b);
-+
-+ *freed = count;
-+
-+ /* And the group descriptor block */
-+ BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-+ ret = ext3_journal_dirty_metadata(handle, gd_bh);
-+ if (!err) err = ret;
-+
-+ if (overflow && !err) {
-+ block += count;
-+ count = overflow;
-+ goto do_more;
-+ }
-+ sb->s_dirt = 1;
-+error_return:
-+ brelse(bitmap_bh);
-+ ext3_std_error(sb, err);
-+ return;
-+}
-+
-+int ext3_mb_reserve_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+ int free, ret = -ENOSPC;
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ free = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-+ if (blocks <= free - sbi->s_blocks_reserved) {
-+ sbi->s_blocks_reserved += blocks;
-+ ret = 0;
-+ }
-+ spin_unlock(&sbi->s_reserve_lock);
-+ return ret;
-+}
-+
-+void ext3_mb_release_blocks(struct super_block *sb, int blocks)
-+{
-+ struct ext3_sb_info *sbi = EXT3_SB(sb);
-+
-+ BUG_ON(blocks < 0);
-+ spin_lock(&sbi->s_reserve_lock);
-+ sbi->s_blocks_reserved -= blocks;
-+ WARN_ON(sbi->s_blocks_reserved < 0);
-+ if (sbi->s_blocks_reserved < 0)
-+ sbi->s_blocks_reserved = 0;
-+ spin_unlock(&sbi->s_reserve_lock);
-+}
-+
-+int ext3_new_block(handle_t *handle, struct inode *inode,
-+ unsigned long goal, int *errp)
-+{
-+ int ret, len;
-+
-+ if (!test_opt(inode->i_sb, MBALLOC)) {
-+ ret = ext3_new_block_old(handle, inode, goal, errp);
-+ goto out;
-+ }
-+ len = 1;
-+ ret = ext3_mb_new_blocks(handle, inode, goal, &len, 0, errp);
-+out:
-+ return ret;
-+}
-+
-+
-+void ext3_free_blocks(handle_t *handle, struct inode * inode,
-+ unsigned long block, unsigned long count, int metadata)
-+{
-+ struct super_block *sb;
-+ int freed;
-+
-+ sb = inode->i_sb;
-+ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
-+ ext3_free_blocks_sb(handle, sb, block, count, &freed);
-+ else
-+ ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
-+ if (freed)
-+ DQUOT_FREE_BLOCK(inode, freed);
-+ return;
-+}
-+
-+#define EXT3_ROOT "ext3"
-+#define EXT3_MB_STATS_NAME "mb_stats"
-+#define EXT3_MB_MAX_TO_SCAN_NAME "mb_max_to_scan"
-+#define EXT3_MB_MIN_TO_SCAN_NAME "mb_min_to_scan"
-+#define EXT3_MB_ORDER2_REQ "mb_order2_req"
-+
-+static int ext3_mb_stats_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_stats);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_stats_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_STATS_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ ext3_mb_stats = (simple_strtol(str, NULL, 0) != 0);
-+ return count;
-+}
-+
-+static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_max_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_max_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_min_to_scan);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_min_to_scan = value;
-+
-+ return count;
-+}
-+
-+static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ *eof = 1;
-+ if (off != 0)
-+ return 0;
-+
-+ len = sprintf(page, "%ld\n", ext3_mb_order2_reqs);
-+ *start = page;
-+ return len;
-+}
-+
-+static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
-+ unsigned long count, void *data)
-+{
-+ char str[32];
-+ long value;
-+
-+ if (count >= sizeof(str)) {
-+ printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
-+ return -EOVERFLOW;
-+ }
-+
-+ if (copy_from_user(str, buffer, count))
-+ return -EFAULT;
-+
-+ /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
-+ value = simple_strtol(str, NULL, 0);
-+ if (value <= 0)
-+ return -ERANGE;
-+
-+ ext3_mb_order2_reqs = value;
-+
-+ return count;
-+}
-+
-+int __init init_ext3_proc(void)
-+{
-+ struct proc_dir_entry *proc_ext3_mb_stats;
-+ struct proc_dir_entry *proc_ext3_mb_max_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_min_to_scan;
-+ struct proc_dir_entry *proc_ext3_mb_order2_req;
-+
-+ proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
-+ if (proc_root_ext3 == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
-+ return -EIO;
-+ }
-+
-+ /* Initialize EXT3_MB_STATS_NAME */
-+ proc_ext3_mb_stats = create_proc_entry(EXT3_MB_STATS_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_stats == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_STATS_NAME);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_stats->data = NULL;
-+ proc_ext3_mb_stats->read_proc = ext3_mb_stats_read;
-+ proc_ext3_mb_stats->write_proc = ext3_mb_stats_write;
-+
-+ /* Initialize EXT3_MAX_TO_SCAN_NAME */
-+ proc_ext3_mb_max_to_scan = create_proc_entry(
-+ EXT3_MB_MAX_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_max_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MAX_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_max_to_scan->data = NULL;
-+ proc_ext3_mb_max_to_scan->read_proc = ext3_mb_max_to_scan_read;
-+ proc_ext3_mb_max_to_scan->write_proc = ext3_mb_max_to_scan_write;
-+
-+ /* Initialize EXT3_MIN_TO_SCAN_NAME */
-+ proc_ext3_mb_min_to_scan = create_proc_entry(
-+ EXT3_MB_MIN_TO_SCAN_NAME,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_min_to_scan == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_MIN_TO_SCAN_NAME);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_min_to_scan->data = NULL;
-+ proc_ext3_mb_min_to_scan->read_proc = ext3_mb_min_to_scan_read;
-+ proc_ext3_mb_min_to_scan->write_proc = ext3_mb_min_to_scan_write;
-+
-+ /* Initialize EXT3_ORDER2_REQ */
-+ proc_ext3_mb_order2_req = create_proc_entry(
-+ EXT3_MB_ORDER2_REQ,
-+ S_IFREG | S_IRUGO | S_IWUSR, proc_root_ext3);
-+ if (proc_ext3_mb_order2_req == NULL) {
-+ printk(KERN_ERR "EXT3-fs: Unable to create %s\n",
-+ EXT3_MB_ORDER2_REQ);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+ return -EIO;
-+ }
-+
-+ proc_ext3_mb_order2_req->data = NULL;
-+ proc_ext3_mb_order2_req->read_proc = ext3_mb_order2_req_read;
-+ proc_ext3_mb_order2_req->write_proc = ext3_mb_order2_req_write;
-+
-+ return 0;
-+}
-+
-+void exit_ext3_proc(void)
-+{
-+ remove_proc_entry(EXT3_MB_STATS_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, proc_root_ext3);
-+ remove_proc_entry(EXT3_MB_ORDER2_REQ, proc_root_ext3);
-+ remove_proc_entry(EXT3_ROOT, proc_root_fs);
-+}
-Index: linux-2.6.9-full/fs/ext3/Makefile
-===================================================================
---- linux-2.6.9-full.orig/fs/ext3/Makefile 2006-06-01 14:58:46.000000000 +0400
-+++ linux-2.6.9-full/fs/ext3/Makefile 2006-10-24 12:54:31.000000000 +0400
-@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
-
- ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o \
-- extents.o
-+ extents.o mballoc.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
- ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+++ /dev/null
-Index: linux-stage/fs/ext3/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/ialloc.c
-+++ linux-stage/fs/ext3/ialloc.c
-@@ -726,7 +726,8 @@ got:
- /* This is the optimal IO size (for stat), not the fs block size */
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = 0;
-- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
-+ ext3_current_time(inode);
-
- memset(ei->i_data, 0, sizeof(ei->i_data));
- ei->i_next_alloc_block = 0;
-@@ -764,9 +765,8 @@ got:
- spin_unlock(&sbi->s_next_gen_lock);
-
- ei->i_state = EXT3_STATE_NEW;
-- ei->i_extra_isize =
-- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-+
-+ ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c
-+++ linux-stage/fs/ext3/inode.c
-@@ -627,7 +627,7 @@ static int ext3_splice_branch(handle_t *
-
- /* We are done with atomic stuff, now do the rest of housekeeping */
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /* had we spliced it onto indirect block? */
-@@ -2230,7 +2230,7 @@ do_indirects:
- ;
- }
- up(&ei->truncate_sem);
-- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /* In a multi-transaction truncate, we only make the final
-@@ -2457,10 +2457,6 @@ void ext3_read_inode(struct inode * inod
- }
- inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- inode->i_size = le32_to_cpu(raw_inode->i_size);
-- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
-- inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
-- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
-- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
- ei->i_state = 0;
- ei->i_next_alloc_block = 0;
-@@ -2521,6 +2517,11 @@ void ext3_read_inode(struct inode * inod
- else
- ei->i_extra_isize = 0;
-
-+ EXT3_INODE_GET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
-+
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ext3_file_inode_operations;
- inode->i_fop = &ext3_file_operations;
-@@ -2601,9 +2602,12 @@ static int ext3_do_update_inode(handle_t
- }
- raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
-- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-+
-+ EXT3_INODE_SET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
-+
- raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
- raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
- raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-Index: linux-stage/fs/ext3/ioctl.c
-===================================================================
---- linux-stage.orig/fs/ext3/ioctl.c
-+++ linux-stage/fs/ext3/ioctl.c
-@@ -112,7 +112,7 @@ int ext3_ioctl (struct inode * inode, st
- ei->i_flags = flags;
-
- ext3_set_inode_flags(inode);
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
-
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- flags_err:
-@@ -150,7 +150,7 @@ flags_err:
- return PTR_ERR(handle);
- err = ext3_reserve_inode_write(handle, inode, &iloc);
- if (err == 0) {
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- inode->i_generation = generation;
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- }
-Index: linux-stage/fs/ext3/namei.c
-===================================================================
---- linux-stage.orig/fs/ext3/namei.c
-+++ linux-stage/fs/ext3/namei.c
-@@ -1302,7 +1302,7 @@ static int add_dirent_to_buf(handle_t *h
- * happen is that the times are slightly out of date
- * and/or different from the directory change time.
- */
-- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-+ dir->i_mtime = dir->i_ctime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- dir->i_version++;
- ext3_mark_inode_dirty(handle, dir);
-@@ -2098,7 +2098,7 @@ static int ext3_rmdir (struct inode * di
- inode->i_version++;
- inode->i_nlink = 0;
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- ext3_dec_count(handle, dir);
- ext3_update_dx_flag(dir);
-@@ -2148,13 +2148,13 @@ static int ext3_unlink(struct inode * di
- retval = ext3_delete_entry(handle, dir, de, bh);
- if (retval)
- goto end_unlink;
-- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ dir->i_ctime = dir->i_mtime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
- ext3_dec_count(handle, inode);
- if (!inode->i_nlink)
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- retval = 0;
-
-@@ -2255,7 +2255,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-@@ -2357,7 +2357,7 @@ static int ext3_rename (struct inode * o
- * Like most other Unix systems, set the ctime for inodes on a
- * rename.
- */
-- old_inode->i_ctime = CURRENT_TIME_SEC;
-+ old_inode->i_ctime = ext3_current_time(old_inode);
- ext3_mark_inode_dirty(handle, old_inode);
-
- /*
-@@ -2390,9 +2390,9 @@ static int ext3_rename (struct inode * o
-
- if (new_inode) {
- ext3_dec_count(handle, new_inode);
-- new_inode->i_ctime = CURRENT_TIME_SEC;
-+ new_inode->i_ctime = ext3_current_time(new_inode);
- }
-- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-+ old_dir->i_ctime = old_dir->i_mtime = ext3_current_time(old_dir);
- ext3_update_dx_flag(old_dir);
- if (dir_bh) {
- BUFFER_TRACE(dir_bh, "get_write_access");
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c
-+++ linux-stage/fs/ext3/super.c
-@@ -1573,6 +1573,8 @@ static int ext3_fill_super (struct super
- sbi->s_inode_size);
- goto failed_mount;
- }
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE)
-+ sb->s_time_gran = 1 << (EXT3_EPOCH_BITS - 2);
- }
- sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
-@@ -1759,6 +1761,32 @@ static int ext3_fill_super (struct super
- }
-
- ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
-+
-+ /* determine the minimum size of new large inodes, if present */
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_want_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_want_extra_isize);
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_min_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_min_extra_isize);
-+ }
-+ }
-+ /* Check if enough inode space is available */
-+ if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-+ sbi->s_inode_size) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ printk(KERN_INFO "EXT3-fs: required extra inode space not"
-+ "available.\n");
-+ }
-+
- /*
- * akpm: core read_super() calls in here with the superblock locked.
- * That deadlocks, because orphan cleanup needs to lock the superblock
-Index: linux-stage/fs/ext3/xattr.c
-===================================================================
---- linux-stage.orig/fs/ext3/xattr.c
-+++ linux-stage/fs/ext3/xattr.c
-@@ -1305,7 +1305,7 @@ getblk_failed:
-
- /* Update the inode. */
- EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- if (IS_SYNC(inode))
- handle->h_sync = 1;
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h
-+++ linux-stage/include/linux/ext3_fs.h
-@@ -280,7 +280,7 @@ struct ext3_inode {
- __le16 i_uid; /* Low 16 bits of Owner Uid */
- __le32 i_size; /* Size in bytes */
- __le32 i_atime; /* Access time */
-- __le32 i_ctime; /* Creation time */
-+ __le32 i_ctime; /* Inode Change time */
- __le32 i_mtime; /* Modification time */
- __le32 i_dtime; /* Deletion Time */
- __le16 i_gid; /* Low 16 bits of Group Id */
-@@ -329,10 +329,73 @@ struct ext3_inode {
- } osd2; /* OS dependent 2 */
- __u16 i_extra_isize;
- __u16 i_pad1;
-+ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
-+ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
-+ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
-+ __le32 i_crtime; /* File Creation time */
-+ __le32 i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */
- };
-
- #define i_size_high i_dir_acl
-
-+#define EXT3_EPOCH_BITS 2
-+#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1)
-+#define EXT3_NSEC_MASK (~0UL << EXT3_EPOCH_BITS)
-+
-+#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \
-+ ((offsetof(typeof(*ext3_inode), field) + \
-+ sizeof((ext3_inode)->field)) \
-+ <= (EXT3_GOOD_OLD_INODE_SIZE + \
-+ (einode)->i_extra_isize)) \
-+
-+static inline __le32 ext3_encode_extra_time(struct timespec *time)
-+{
-+ return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-+ time->tv_sec >> 32 : 0) |
-+ ((time->tv_nsec << 2) & EXT3_NSEC_MASK));
-+}
-+
-+static inline void ext3_decode_extra_time(struct timespec *time, __le32 extra) {
-+ if (sizeof(time->tv_sec) > 4)
-+ time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT3_EPOCH_MASK)
-+ << 32;
-+ time->tv_nsec = (le32_to_cpu(extra) & EXT3_NSEC_MASK) >> 2;
-+}
-+
-+#define EXT3_INODE_SET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra))\
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(inode)->xtime); \
-+} while (0)
-+
-+#define EXT3_EINODE_SET_XTIME(xtime, einode, raw_inode) \
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(einode)->xtime); \
-+} while (0)
-+
-+#define EXT3_INODE_GET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (inode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra))\
-+ ext3_decode_extra_time(&(inode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
-+#define EXT3_EINODE_GET_XTIME(xtime, einode, raw_inode) \
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (einode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ ext3_decode_extra_time(&(einode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
- #if defined(__KERNEL__) || defined(__linux__)
- #define i_reserved1 osd1.linux1.l_i_reserved1
- #define i_frag osd2.linux2.l_i_frag
-@@ -500,11 +563,19 @@ struct ext3_super_block {
- __le32 s_last_orphan; /* start of list of inodes to delete */
- __le32 s_hash_seed[4]; /* HTREE hash seed */
- __u8 s_def_hash_version; /* Default hash version to use */
-- __u8 s_reserved_char_pad;
-- __u16 s_reserved_word_pad;
-+ __u8 s_jnl_backup_type; /* Default type of journal backup */
-+ __le16 s_desc_size; /* Group desc. size: INCOMPAT_64BIT */
- __le32 s_default_mount_opts;
-- __le32 s_first_meta_bg; /* First metablock block group */
-- __u32 s_reserved[190]; /* Padding to the end of the block */
-+ __le32 s_first_meta_bg; /* First metablock block group */
-+ __le32 s_mkfs_time; /* When the filesystem was created */
-+ __le32 s_jnl_blocks[17]; /* Backup of the journal inode */
-+ __le32 s_blocks_count_hi; /* Blocks count high 32 bits */
-+ __le32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/
-+ __le32 s_free_blocks_hi; /* Free blocks count high 32 bits */
-+ __le16 s_min_extra_isize; /* All inodes have at least # bytes */
-+ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
-+ __le32 s_flags; /* Miscellaneous flags */
-+ __u32 s_reserved[167]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -580,6 +651,7 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
- #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -597,6 +669,7 @@ static inline struct ext3_inode_info *EX
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
-@@ -724,6 +797,12 @@ static inline struct ext3_inode *ext3_ra
- return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
- }
-
-+static inline struct timespec ext3_current_time(struct inode *inode)
-+{
-+ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
-+ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-+}
-+
- /*
- * This structure is stuffed into the struct file's private_data field
- * for directories. It is where we put information so that we can do
-Index: linux-stage/include/linux/ext3_fs_i.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_i.h
-+++ linux-stage/include/linux/ext3_fs_i.h
-@@ -130,6 +130,7 @@ struct ext3_inode_info {
-
- /* on-disk additional length */
- __u16 i_extra_isize;
-+ struct timespec i_crtime;
-
- /*
- * truncate_sem is for serialising ext3_truncate() against
-Index: linux-stage/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs_sb.h
-+++ linux-stage/include/linux/ext3_fs_sb.h
-@@ -71,6 +71,8 @@ struct ext3_sb_info {
- /* Last group used to allocate inode */
- int s_last_alloc_group;
-
-+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
-+
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
+++ /dev/null
-Index: linux-2.6.16.27-0.9/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/ialloc.c
-+++ linux-2.6.16.27-0.9/fs/ext3/ialloc.c
-@@ -577,7 +577,8 @@ got:
- /* This is the optimal IO size (for stat), not the fs block size */
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = 0;
-- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
-+ ext3_current_time(inode);
-
- memset(ei->i_data, 0, sizeof(ei->i_data));
- ei->i_dir_start_lookup = 0;
-@@ -609,9 +610,8 @@ got:
- spin_unlock(&sbi->s_next_gen_lock);
-
- ei->i_state = EXT3_STATE_NEW;
-- ei->i_extra_isize =
-- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-+
-+ ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-2.6.16.27-0.9/fs/ext3/inode.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/inode.c
-+++ linux-2.6.16.27-0.9/fs/ext3/inode.c
-@@ -620,7 +620,7 @@ static int ext3_splice_branch(handle_t *
-
- /* We are done with atomic stuff, now do the rest of housekeeping */
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /* had we spliced it onto indirect block? */
-@@ -2244,7 +2244,7 @@ do_indirects:
- ext3_discard_reservation(inode);
-
- up(&ei->truncate_sem);
-- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /* In a multi-transaction truncate, we only make the final
-@@ -2479,10 +2479,6 @@ void ext3_read_inode(struct inode * inod
- }
- inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- inode->i_size = le32_to_cpu(raw_inode->i_size);
-- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
-- inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
-- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
-- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
- ei->i_state = 0;
- ei->i_dir_start_lookup = 0;
-@@ -2557,6 +2553,11 @@ void ext3_read_inode(struct inode * inod
- } else
- ei->i_extra_isize = 0;
-
-+ EXT3_INODE_GET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
-+
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ext3_file_inode_operations;
- inode->i_fop = &ext3_file_operations;
-@@ -2637,9 +2638,12 @@ static int ext3_do_update_inode(handle_t
- }
- raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
-- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-+
-+ EXT3_INODE_SET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
-+
- raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
- raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
- raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-Index: linux-2.6.16.27-0.9/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/ioctl.c
-+++ linux-2.6.16.27-0.9/fs/ext3/ioctl.c
-@@ -88,7 +88,7 @@ int ext3_ioctl (struct inode * inode, st
- ei->i_flags = flags;
-
- ext3_set_inode_flags(inode);
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
-
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- flags_err:
-@@ -126,7 +126,7 @@ flags_err:
- return PTR_ERR(handle);
- err = ext3_reserve_inode_write(handle, inode, &iloc);
- if (err == 0) {
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- inode->i_generation = generation;
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- }
-Index: linux-2.6.16.27-0.9/fs/ext3/namei.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/namei.c
-+++ linux-2.6.16.27-0.9/fs/ext3/namei.c
-@@ -1276,7 +1276,7 @@ static int add_dirent_to_buf(handle_t *h
- * happen is that the times are slightly out of date
- * and/or different from the directory change time.
- */
-- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-+ dir->i_mtime = dir->i_ctime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- dir->i_version++;
- ext3_mark_inode_dirty(handle, dir);
-@@ -2056,7 +2056,7 @@ static int ext3_rmdir (struct inode * di
- inode->i_version++;
- inode->i_nlink = 0;
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- ext3_dec_count(handle, dir);
- ext3_update_dx_flag(dir);
-@@ -2106,13 +2106,13 @@ static int ext3_unlink(struct inode * di
- retval = ext3_delete_entry(handle, dir, de, bh);
- if (retval)
- goto end_unlink;
-- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ dir->i_ctime = dir->i_mtime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
- ext3_dec_count(handle, inode);
- if (!inode->i_nlink)
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- retval = 0;
-
-@@ -2214,7 +2214,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-@@ -2317,7 +2317,7 @@ static int ext3_rename (struct inode * o
- * Like most other Unix systems, set the ctime for inodes on a
- * rename.
- */
-- old_inode->i_ctime = CURRENT_TIME_SEC;
-+ old_inode->i_ctime = ext3_current_time(old_inode);
- ext3_mark_inode_dirty(handle, old_inode);
-
- /*
-@@ -2350,9 +2350,9 @@ static int ext3_rename (struct inode * o
-
- if (new_inode) {
- ext3_dec_count(handle, new_inode);
-- new_inode->i_ctime = CURRENT_TIME_SEC;
-+ new_inode->i_ctime = ext3_current_time(new_inode);
- }
-- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-+ old_dir->i_ctime = old_dir->i_mtime = ext3_current_time(old_dir);
- ext3_update_dx_flag(old_dir);
- if (dir_bh) {
- BUFFER_TRACE(dir_bh, "get_write_access");
-Index: linux-2.6.16.27-0.9/fs/ext3/super.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/super.c
-+++ linux-2.6.16.27-0.9/fs/ext3/super.c
-@@ -1614,6 +1614,8 @@ static int ext3_fill_super (struct super
- sbi->s_inode_size);
- goto failed_mount;
- }
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE)
-+ sb->s_time_gran = 1 << (EXT3_EPOCH_BITS - 2);
- }
- sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
-@@ -1809,6 +1811,32 @@ static int ext3_fill_super (struct super
- }
-
- ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
-+
-+ /* determine the minimum size of new large inodes, if present */
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_want_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_want_extra_isize);
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_min_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_min_extra_isize);
-+ }
-+ }
-+ /* Check if enough inode space is available */
-+ if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-+ sbi->s_inode_size) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ printk(KERN_INFO "EXT3-fs: required extra inode space not"
-+ "available.\n");
-+ }
-+
- /*
- * akpm: core read_super() calls in here with the superblock locked.
- * That deadlocks, because orphan cleanup needs to lock the superblock
-Index: linux-2.6.16.27-0.9/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.16.27-0.9.orig/fs/ext3/xattr.c
-+++ linux-2.6.16.27-0.9/fs/ext3/xattr.c
-@@ -1006,8 +1006,8 @@ ext3_xattr_set_handle(handle_t *handle,
- }
- if (!error) {
- ext3_xattr_update_super_block(handle, inode->i_sb);
-- inode->i_ctime = CURRENT_TIME_SEC;
-- error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
-+ inode->i_ctime = ext3_current_time(inode);
-+ ext3_mark_inode_dirty(handle, inode);
- /*
- * The bh is consumed by ext3_mark_iloc_dirty, even with
- * error != 0.
-Index: linux-2.6.16.27-0.9/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs.h
-+++ linux-2.6.16.27-0.9/include/linux/ext3_fs.h
-@@ -272,7 +272,7 @@ struct ext3_inode {
- __le16 i_uid; /* Low 16 bits of Owner Uid */
- __le32 i_size; /* Size in bytes */
- __le32 i_atime; /* Access time */
-- __le32 i_ctime; /* Creation time */
-+ __le32 i_ctime; /* Inode Change time */
- __le32 i_mtime; /* Modification time */
- __le32 i_dtime; /* Deletion Time */
- __le16 i_gid; /* Low 16 bits of Group Id */
-@@ -321,10 +321,73 @@ struct ext3_inode {
- } osd2; /* OS dependent 2 */
- __le16 i_extra_isize;
- __le16 i_pad1;
-+ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
-+ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
-+ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
-+ __le32 i_crtime; /* File Creation time */
-+ __le32 i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */
- };
-
- #define i_size_high i_dir_acl
-
-+#define EXT3_EPOCH_BITS 2
-+#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1)
-+#define EXT3_NSEC_MASK (~0UL << EXT3_EPOCH_BITS)
-+
-+#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \
-+ ((offsetof(typeof(*ext3_inode), field) + \
-+ sizeof((ext3_inode)->field)) \
-+ <= (EXT3_GOOD_OLD_INODE_SIZE + \
-+ (einode)->i_extra_isize)) \
-+
-+static inline __le32 ext3_encode_extra_time(struct timespec *time)
-+{
-+ return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-+ time->tv_sec >> 32 : 0) |
-+ ((time->tv_nsec << 2) & EXT3_NSEC_MASK));
-+}
-+
-+static inline void ext3_decode_extra_time(struct timespec *time, __le32 extra) {
-+ if (sizeof(time->tv_sec) > 4)
-+ time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT3_EPOCH_MASK)
-+ << 32;
-+ time->tv_nsec = (le32_to_cpu(extra) & EXT3_NSEC_MASK) >> 2;
-+}
-+
-+#define EXT3_INODE_SET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra)) \
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(inode)->xtime); \
-+} while (0)
-+
-+#define EXT3_EINODE_SET_XTIME(xtime, einode, raw_inode) \
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(einode)->xtime); \
-+} while (0)
-+
-+#define EXT3_INODE_GET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (inode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra))\
-+ ext3_decode_extra_time(&(inode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
-+#define EXT3_EINODE_GET_XTIME(xtime, einode, raw_inode) \
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (einode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime);\
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ ext3_decode_extra_time(&(einode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
- #if defined(__KERNEL__) || defined(__linux__)
- #define i_reserved1 osd1.linux1.l_i_reserved1
- #define i_frag osd2.linux2.l_i_frag
-@@ -504,11 +567,19 @@ struct ext3_super_block {
- __le32 s_last_orphan; /* start of list of inodes to delete */
- __le32 s_hash_seed[4]; /* HTREE hash seed */
- __u8 s_def_hash_version; /* Default hash version to use */
-- __u8 s_reserved_char_pad;
-- __u16 s_reserved_word_pad;
-+ __u8 s_jnl_backup_type; /* Default type of journal backup */
-+ __le16 s_desc_size; /* Group desc. size: INCOMPAT_64BIT */
- __le32 s_default_mount_opts;
-- __le32 s_first_meta_bg; /* First metablock block group */
-- __u32 s_reserved[190]; /* Padding to the end of the block */
-+ __le32 s_first_meta_bg; /* First metablock block group */
-+ __le32 s_mkfs_time; /* When the filesystem was created */
-+ __le32 s_jnl_blocks[17]; /* Backup of the journal inode */
-+ __le32 s_blocks_count_hi; /* Blocks count high 32 bits */
-+ __le32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/
-+ __le32 s_free_blocks_hi; /* Free blocks count high 32 bits */
-+ __le16 s_min_extra_isize; /* All inodes have at least # bytes */
-+ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
-+ __le32 s_flags; /* Miscellaneous flags */
-+ __u32 s_reserved[167]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -583,6 +648,8 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
- #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-+
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -599,6 +666,7 @@ static inline struct ext3_inode_info *EX
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
-@@ -726,6 +794,12 @@ static inline struct ext3_inode *ext3_ra
- return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
- }
-
-+static inline struct timespec ext3_current_time(struct inode *inode)
-+{
-+ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
-+ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-+}
-+
- /*
- * This structure is stuffed into the struct file's private_data field
- * for directories. It is where we put information so that we can do
-Index: linux-2.6.16.27-0.9/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs_i.h
-+++ linux-2.6.16.27-0.9/include/linux/ext3_fs_i.h
-@@ -130,6 +130,7 @@ struct ext3_inode_info {
-
- /* on-disk additional length */
- __u16 i_extra_isize;
-+ struct timespec i_crtime;
-
- /*
- * truncate_sem is for serialising ext3_truncate() against
-Index: linux-2.6.16.27-0.9/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs_sb.h
-+++ linux-2.6.16.27-0.9/include/linux/ext3_fs_sb.h
-@@ -71,6 +71,8 @@ struct ext3_sb_info {
- /* Last group used to allocate inode */
- int s_last_alloc_group;
-
-+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
-+
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
+++ /dev/null
-Index: linux-2.6.5-7.283/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.5-7.283.orig/fs/ext3/ialloc.c
-+++ linux-2.6.5-7.283/fs/ext3/ialloc.c
-@@ -613,7 +613,8 @@ got:
- /* This is the optimal IO size (for stat), not the fs block size */
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = 0;
-- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
-+ CURRENT_TIME;
-
- memset(ei->i_data, 0, sizeof(ei->i_data));
- ei->i_next_alloc_block = 0;
-@@ -651,9 +652,8 @@ got:
- spin_unlock(&sbi->s_next_gen_lock);
-
- ei->i_state = EXT3_STATE_NEW;
-- ei->i_extra_isize =
-- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-+
-+ ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-2.6.5-7.283/fs/ext3/inode.c
-===================================================================
---- linux-2.6.5-7.283.orig/fs/ext3/inode.c
-+++ linux-2.6.5-7.283/fs/ext3/inode.c
-@@ -2459,7 +2459,11 @@ void ext3_read_inode(struct inode * inod
- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
- inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
-- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-+ if (EXT3_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
-+ ei->i_crtime.tv_sec = le32_to_cpu(raw_inode->i_crtime);
-+ }
-+ inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec =
-+ ei->i_crtime.tv_nsec = 0;
-
- ei->i_state = 0;
- ei->i_next_alloc_block = 0;
-@@ -2603,6 +2607,10 @@ static int ext3_do_update_inode(handle_t
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-+ if (EXT3_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
-+ raw_inode->i_crtime = cpu_to_le32(ei->i_crtime.tv_sec);
-+ }
-+
- raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
- raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
- raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-Index: linux-2.6.5-7.283/fs/ext3/super.c
-===================================================================
---- linux-2.6.5-7.283.orig/fs/ext3/super.c
-+++ linux-2.6.5-7.283/fs/ext3/super.c
-@@ -1515,6 +1515,32 @@ static int ext3_fill_super (struct super
- }
-
- ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
-+
-+ /* determine the minimum size of new large inodes, if present */
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_want_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_want_extra_isize);
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_min_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_min_extra_isize);
-+ }
-+ }
-+ /* Check if enough inode space is available */
-+ if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-+ sbi->s_inode_size) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ printk(KERN_INFO "EXT3-fs: required extra inode space not"
-+ "available.\n");
-+ }
-+
- /*
- * akpm: core read_super() calls in here with the superblock locked.
- * That deadlocks, because orphan cleanup needs to lock the superblock
-Index: linux-2.6.5-7.283/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-7.283.orig/include/linux/ext3_fs.h
-+++ linux-2.6.5-7.283/include/linux/ext3_fs.h
-@@ -232,7 +232,7 @@ struct ext3_inode {
- __u16 i_uid; /* Low 16 bits of Owner Uid */
- __u32 i_size; /* Size in bytes */
- __u32 i_atime; /* Access time */
-- __u32 i_ctime; /* Creation time */
-+ __u32 i_ctime; /* Inode Change time */
- __u32 i_mtime; /* Modification time */
- __u32 i_dtime; /* Deletion Time */
- __u16 i_gid; /* Low 16 bits of Group Id */
-@@ -281,10 +281,25 @@ struct ext3_inode {
- } osd2; /* OS dependent 2 */
- __u16 i_extra_isize;
- __u16 i_pad1;
-+ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
-+ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
-+ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
-+ __le32 i_crtime; /* File Creation time */
-+ __le32 i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */
- };
-
- #define i_size_high i_dir_acl
-
-+#define EXT3_EPOCH_BITS 2
-+#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1)
-+#define EXT3_NSEC_MASK (~0UL << EXT3_EPOCH_BITS)
-+
-+#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \
-+ ((offsetof(typeof(*ext3_inode), field) + \
-+ sizeof((ext3_inode)->field)) \
-+ <= (EXT3_GOOD_OLD_INODE_SIZE + \
-+ (einode)->i_extra_isize)) \
-+
- #if defined(__KERNEL__) || defined(__linux__)
- #define i_reserved1 osd1.linux1.l_i_reserved1
- #define i_frag osd2.linux2.l_i_frag
-@@ -460,11 +475,19 @@ struct ext3_super_block {
- __u32 s_last_orphan; /* start of list of inodes to delete */
- __u32 s_hash_seed[4]; /* HTREE hash seed */
- __u8 s_def_hash_version; /* Default hash version to use */
-- __u8 s_reserved_char_pad;
-- __u16 s_reserved_word_pad;
-+ __u8 s_jnl_backup_type; /* Default type of journal backup */
-+ __u16 s_desc_size; /* Group desc. size: INCOMPAT_64BIT */
- __u32 s_default_mount_opts;
-- __u32 s_first_meta_bg; /* First metablock block group */
-- __u32 s_reserved[190]; /* Padding to the end of the block */
-+ __u32 s_first_meta_bg; /* First metablock block group */
-+ __u32 s_mkfs_time; /* When the filesystem was created */
-+ __u32 s_jnl_blocks[17]; /* Backup of the journal inode */
-+ __u32 s_blocks_count_hi; /* Blocks count high 32 bits */
-+ __u32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/
-+ __u32 s_free_blocks_hi; /* Free blocks count high 32 bits */
-+ __u16 s_min_extra_isize; /* All inodes have at least # bytes */
-+ __u16 s_want_extra_isize; /* New inodes should reserve # bytes */
-+ __u32 s_flags; /* Miscellaneous flags */
-+ __u32 s_reserved[167]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -539,6 +556,7 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
- #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -555,6 +573,7 @@ static inline struct ext3_inode_info *EX
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
-Index: linux-2.6.5-7.283/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.5-7.283.orig/include/linux/ext3_fs_i.h
-+++ linux-2.6.5-7.283/include/linux/ext3_fs_i.h
-@@ -130,6 +130,7 @@ struct ext3_inode_info {
-
- /* on-disk additional length */
- __u16 i_extra_isize;
-+ struct timespec i_crtime;
-
- /*
- * truncate_sem is for serialising ext3_truncate() against
-Index: linux-2.6.5-7.283/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.5-7.283.orig/include/linux/ext3_fs_sb.h
-+++ linux-2.6.5-7.283/include/linux/ext3_fs_sb.h
-@@ -71,6 +71,8 @@ struct ext3_sb_info {
- /* Last group used to allocate inode */
- int s_last_alloc_group;
-
-+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
-+
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
+++ /dev/null
-Index: linux-2.6.18/fs/ext3/ialloc.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/ialloc.c
-+++ linux-2.6.18/fs/ext3/ialloc.c
-@@ -615,7 +615,8 @@ got:
- /* This is the optimal IO size (for stat), not the fs block size */
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = 0;
-- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
-+ ext3_current_time(inode);
-
- memset(ei->i_data, 0, sizeof(ei->i_data));
- ei->i_dir_start_lookup = 0;
-@@ -647,9 +648,8 @@ got:
- spin_unlock(&sbi->s_next_gen_lock);
-
- ei->i_state = EXT3_STATE_NEW;
-- ei->i_extra_isize =
-- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
-+
-+ ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize;
-
- ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
-Index: linux-2.6.18/fs/ext3/inode.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/inode.c
-+++ linux-2.6.18/fs/ext3/inode.c
-@@ -729,7 +729,7 @@ static int ext3_splice_branch(handle_t *
-
- /* We are done with atomic stuff, now do the rest of housekeeping */
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /* had we spliced it onto indirect block? */
-@@ -2388,7 +2388,7 @@ do_indirects:
- ext3_discard_reservation(inode);
-
- mutex_unlock(&ei->truncate_mutex);
-- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_mtime = inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
-
- /*
-@@ -2624,10 +2624,6 @@ void ext3_read_inode(struct inode * inod
- }
- inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- inode->i_size = le32_to_cpu(raw_inode->i_size);
-- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
-- inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
-- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
-- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
- ei->i_state = 0;
- ei->i_dir_start_lookup = 0;
-@@ -2702,6 +2698,11 @@ void ext3_read_inode(struct inode * inod
- } else
- ei->i_extra_isize = 0;
-
-+ EXT3_INODE_GET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_GET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
-+
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ext3_file_inode_operations;
- inode->i_fop = &ext3_file_operations;
-@@ -2782,9 +2783,12 @@ static int ext3_do_update_inode(handle_t
- }
- raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
-- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-+
-+ EXT3_INODE_SET_XTIME(i_ctime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_mtime, inode, raw_inode);
-+ EXT3_INODE_SET_XTIME(i_atime, inode, raw_inode);
-+ EXT3_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
-+
- raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
- raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
- raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-Index: linux-2.6.18/fs/ext3/ioctl.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/ioctl.c
-+++ linux-2.6.18/fs/ext3/ioctl.c
-@@ -120,7 +120,7 @@ int ext3_ioctl (struct inode * inode, st
- ei->i_flags = flags;
-
- ext3_set_inode_flags(inode);
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
-
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- flags_err:
-@@ -161,7 +161,7 @@ flags_err:
- return PTR_ERR(handle);
- err = ext3_reserve_inode_write(handle, inode, &iloc);
- if (err == 0) {
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- inode->i_generation = generation;
- err = ext3_mark_iloc_dirty(handle, inode, &iloc);
- }
-Index: linux-2.6.18/fs/ext3/namei.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/namei.c
-+++ linux-2.6.18/fs/ext3/namei.c
-@@ -1287,7 +1287,7 @@ static int add_dirent_to_buf(handle_t *h
- * happen is that the times are slightly out of date
- * and/or different from the directory change time.
- */
-- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-+ dir->i_mtime = dir->i_ctime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- dir->i_version++;
- ext3_mark_inode_dirty(handle, dir);
-@@ -2079,7 +2079,7 @@ static int ext3_rmdir (struct inode * di
- inode->i_version++;
- inode->i_nlink = 0;
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- ext3_dec_count(handle, dir);
- ext3_update_dx_flag(dir);
-@@ -2129,13 +2129,13 @@ static int ext3_unlink(struct inode * di
- retval = ext3_delete_entry(handle, dir, de, bh);
- if (retval)
- goto end_unlink;
-- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-+ dir->i_ctime = dir->i_mtime = ext3_current_time(dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
- ext3_dec_count(handle, inode);
- if (!inode->i_nlink)
- ext3_orphan_add(handle, inode);
-- inode->i_ctime = dir->i_ctime;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_mark_inode_dirty(handle, inode);
- retval = 0;
-
-@@ -2237,7 +2237,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-@@ -2340,7 +2340,7 @@ static int ext3_rename (struct inode * o
- * Like most other Unix systems, set the ctime for inodes on a
- * rename.
- */
-- old_inode->i_ctime = CURRENT_TIME_SEC;
-+ old_inode->i_ctime = ext3_current_time(old_inode);
- ext3_mark_inode_dirty(handle, old_inode);
-
- /*
-@@ -2373,9 +2373,9 @@ static int ext3_rename (struct inode * o
-
- if (new_inode) {
- ext3_dec_count(handle, new_inode);
-- new_inode->i_ctime = CURRENT_TIME_SEC;
-+ new_inode->i_ctime = ext3_current_time(new_inode);
- }
-- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-+ old_dir->i_ctime = old_dir->i_mtime = ext3_current_time(old_dir);
- ext3_update_dx_flag(old_dir);
- if (dir_bh) {
- BUFFER_TRACE(dir_bh, "get_write_access");
-Index: linux-2.6.18/fs/ext3/super.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/super.c
-+++ linux-2.6.18/fs/ext3/super.c
-@@ -1615,6 +1615,8 @@ static int ext3_fill_super (struct super
- sbi->s_inode_size);
- goto failed_mount;
- }
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE)
-+ sb->s_time_gran = 1 << (EXT3_EPOCH_BITS - 2);
- }
- sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
-@@ -1819,6 +1821,32 @@ static int ext3_fill_super (struct super
- }
-
- ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
-+
-+ /* determine the minimum size of new large inodes, if present */
-+ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_want_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_want_extra_isize);
-+ if (sbi->s_want_extra_isize <
-+ le16_to_cpu(es->s_min_extra_isize))
-+ sbi->s_want_extra_isize =
-+ le16_to_cpu(es->s_min_extra_isize);
-+ }
-+ }
-+ /* Check if enough inode space is available */
-+ if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-+ sbi->s_inode_size) {
-+ sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
-+ EXT3_GOOD_OLD_INODE_SIZE;
-+ printk(KERN_INFO "EXT3-fs: required extra inode space not"
-+ "available.\n");
-+ }
-+
- /*
- * akpm: core read_super() calls in here with the superblock locked.
- * That deadlocks, because orphan cleanup needs to lock the superblock
-Index: linux-2.6.18/fs/ext3/xattr.c
-===================================================================
---- linux-2.6.18.orig/fs/ext3/xattr.c
-+++ linux-2.6.18/fs/ext3/xattr.c
-@@ -1007,7 +1007,7 @@ ext3_xattr_set_handle(handle_t *handle,
- }
- if (!error) {
- ext3_xattr_update_super_block(handle, inode->i_sb);
-- inode->i_ctime = CURRENT_TIME_SEC;
-+ inode->i_ctime = ext3_current_time(inode);
- error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
- /*
- * The bh is consumed by ext3_mark_iloc_dirty, even with
-Index: linux-2.6.18/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.18.orig/include/linux/ext3_fs.h
-+++ linux-2.6.18/include/linux/ext3_fs.h
-@@ -268,7 +268,7 @@ struct ext3_inode {
- __le16 i_uid; /* Low 16 bits of Owner Uid */
- __le32 i_size; /* Size in bytes */
- __le32 i_atime; /* Access time */
-- __le32 i_ctime; /* Creation time */
-+ __le32 i_ctime; /* Inode Change time */
- __le32 i_mtime; /* Modification time */
- __le32 i_dtime; /* Deletion Time */
- __le16 i_gid; /* Low 16 bits of Group Id */
-@@ -317,10 +317,73 @@ struct ext3_inode {
- } osd2; /* OS dependent 2 */
- __le16 i_extra_isize;
- __le16 i_pad1;
-+ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
-+ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
-+ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
-+ __le32 i_crtime; /* File Creation time */
-+ __le32 i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */
- };
-
- #define i_size_high i_dir_acl
-
-+#define EXT3_EPOCH_BITS 2
-+#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1)
-+#define EXT3_NSEC_MASK (~0UL << EXT3_EPOCH_BITS)
-+
-+#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \
-+ ((offsetof(typeof(*ext3_inode), field) + \
-+ sizeof((ext3_inode)->field)) \
-+ <= (EXT3_GOOD_OLD_INODE_SIZE + \
-+ (einode)->i_extra_isize)) \
-+
-+static inline __le32 ext3_encode_extra_time(struct timespec *time)
-+{
-+ return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-+ time->tv_sec >> 32 : 0) |
-+ ((time->tv_nsec << 2) & EXT3_NSEC_MASK));
-+}
-+
-+static inline void ext3_decode_extra_time(struct timespec *time, __le32 extra) {
-+ if (sizeof(time->tv_sec) > 4)
-+ time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT3_EPOCH_MASK)
-+ << 32;
-+ time->tv_nsec = (le32_to_cpu(extra) & EXT3_NSEC_MASK) >> 2;
-+}
-+
-+#define EXT3_INODE_SET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra)) \
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(inode)->xtime); \
-+} while (0)
-+
-+#define EXT3_EINODE_SET_XTIME(xtime, einode, raw_inode)\
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ (raw_inode)->xtime ## _extra = \
-+ ext3_encode_extra_time(&(einode)->xtime); \
-+} while (0)
-+
-+#define EXT3_INODE_GET_XTIME(xtime, inode, raw_inode) \
-+do { \
-+ (inode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra)) \
-+ ext3_decode_extra_time(&(inode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
-+#define EXT3_EINODE_GET_XTIME(xtime, einode, raw_inode) \
-+do { \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \
-+ (einode)->xtime.tv_sec = le32_to_cpu((raw_inode)->xtime); \
-+ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
-+ ext3_decode_extra_time(&(einode)->xtime, \
-+ raw_inode->xtime ## _extra); \
-+} while (0)
-+
- #if defined(__KERNEL__) || defined(__linux__)
- #define i_reserved1 osd1.linux1.l_i_reserved1
- #define i_frag osd2.linux2.l_i_frag
-@@ -498,11 +562,19 @@ struct ext3_super_block {
- __le32 s_last_orphan; /* start of list of inodes to delete */
- __le32 s_hash_seed[4]; /* HTREE hash seed */
- __u8 s_def_hash_version; /* Default hash version to use */
-- __u8 s_reserved_char_pad;
-- __u16 s_reserved_word_pad;
-+ __u8 s_jnl_backup_type; /* Default type of journal backup */
-+ __le16 s_desc_size; /* Group desc. size: INCOMPAT_64BIT */
- __le32 s_default_mount_opts;
-- __le32 s_first_meta_bg; /* First metablock block group */
-- __u32 s_reserved[190]; /* Padding to the end of the block */
-+ __le32 s_first_meta_bg; /* First metablock block group */
-+ __le32 s_mkfs_time; /* When the filesystem was created */
-+ __le32 s_jnl_blocks[17]; /* Backup of the journal inode */
-+ __le32 s_blocks_count_hi; /* Blocks count high 32 bits */
-+ __le32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/
-+ __le32 s_free_blocks_count_hi; /* Free blocks count high 32 bits */
-+ __le16 s_min_extra_isize; /* All inodes have at least # bytes */
-+ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
-+ __le32 s_flags; /* Miscellaneous flags */
-+ __u32 s_reserved[167]; /* Padding to the end of the block */
- };
-
- #ifdef __KERNEL__
-@@ -519,6 +584,13 @@ static inline struct ext3_inode_info *EX
- return container_of(inode, struct ext3_inode_info, vfs_inode);
- }
-
-+static inline struct timespec ext3_current_time(struct inode *inode)
-+{
-+ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
-+ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-+}
-+
-+
- static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
- {
- return ino == EXT3_ROOT_INO ||
-@@ -590,6 +662,8 @@ static inline int ext3_valid_inum(struct
- #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
- #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
-+
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -606,6 +680,7 @@ static inline int ext3_valid_inum(struct
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
- EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
-+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
-Index: linux-2.6.18/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.18.orig/include/linux/ext3_fs_sb.h
-+++ linux-2.6.18/include/linux/ext3_fs_sb.h
-@@ -119,6 +119,8 @@ struct ext3_sb_info {
- spinlock_t s_bal_lock;
- unsigned long s_mb_buddies_generated;
- unsigned long long s_mb_generation_time;
-+
-+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
- };
-
- #define EXT3_GROUP_INFO(sb, group) \
-Index: linux-2.6.18/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.18.orig/include/linux/ext3_fs_i.h
-+++ linux-2.6.18/include/linux/ext3_fs_i.h
-@@ -144,6 +144,7 @@ struct ext3_inode_info {
- struct inode vfs_inode;
-
- __u32 i_cached_extent[4];
-+ struct timespec i_crtime;
-
- void *i_filterdata;
- };
+++ /dev/null
-Index: linux-2.6.5-7.283/fs/ext3/namei.c
-===================================================================
---- linux-2.6.5-7.283.orig/fs/ext3/namei.c
-+++ linux-2.6.5-7.283/fs/ext3/namei.c
-@@ -1613,11 +1613,17 @@ static int ext3_delete_entry (handle_t *
- static inline void ext3_inc_count(handle_t *handle, struct inode *inode)
- {
- inode->i_nlink++;
-+ if (is_dx(inode) && inode->i_nlink > 1) {
-+ /* limit is 16-bit i_links_count */
-+ if (inode->i_nlink >= EXT3_LINK_MAX || inode->i_nlink == 2)
-+ inode->i_nlink = 1;
-+ }
- }
-
- static inline void ext3_dec_count(handle_t *handle, struct inode *inode)
- {
-- inode->i_nlink--;
-+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
-+ inode->i_nlink--;
- }
-
- static int ext3_add_nondir(handle_t *handle,
-@@ -1730,7 +1736,7 @@ static int ext3_mkdir(struct inode * dir
- int retries = 0;
- int err;
-
-- if (dir->i_nlink >= EXT3_LINK_MAX)
-+ if (EXT3_DIR_LINK_MAX(dir))
- return -EMLINK;
-
- retry:
-@@ -1752,7 +1758,7 @@ retry:
- inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- dir_block = ext3_bread (handle, inode, 0, 1, &err);
- if (!dir_block) {
-- inode->i_nlink--; /* is this nlink == 0? */
-+ ext3_dec_count(handle, inode); /* is this nlink == 0? */
- ext3_mark_inode_dirty(handle, inode);
- iput (inode);
- goto out_stop;
-@@ -1784,7 +1790,7 @@ retry:
- iput (inode);
- goto out_stop;
- }
-- dir->i_nlink++;
-+ ext3_inc_count(handle, dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
- d_instantiate(dentry, inode);
-@@ -2042,16 +2048,16 @@ static int ext3_rmdir (struct inode * di
- retval = ext3_delete_entry(handle, dir, de, bh);
- if (retval)
- goto end_rmdir;
-- if (inode->i_nlink != 2)
-- ext3_warning (inode->i_sb, "ext3_rmdir",
-- "empty directory has nlink!=2 (%d)",
-- inode->i_nlink);
-+ if (!EXT3_DIR_LINK_EMPTY(inode))
-+ ext3_warning(inode->i_sb, "ext3_rmdir",
-+ "empty directory has too many links (%d)",
-+ inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- ext3_mark_inode_dirty(handle, inode);
-- dir->i_nlink--;
-+ ext3_dec_count(handle, dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
-
-@@ -2100,7 +2106,7 @@ static int ext3_unlink(struct inode * di
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
-- inode->i_nlink--;
-+ ext3_dec_count(handle, inode);
- if (!inode->i_nlink)
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime;
-@@ -2191,7 +2197,7 @@ static int ext3_link (struct dentry * ol
- struct inode *inode = old_dentry->d_inode;
- int err, retries = 0;
-
-- if (inode->i_nlink >= EXT3_LINK_MAX)
-+ if (EXT3_DIR_LINK_MAX(inode))
- return -EMLINK;
-
- retry:
-@@ -2277,8 +2283,8 @@ static int ext3_rename (struct inode * o
- if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
- goto end_rename;
- retval = -EMLINK;
-- if (!new_inode && new_dir!=old_dir &&
-- new_dir->i_nlink >= EXT3_LINK_MAX)
-+ if (!new_inode && new_dir != old_dir &&
-+ EXT3_DIR_LINK_MAX(new_dir))
- goto end_rename;
- }
- if (!new_bh) {
-@@ -2335,7 +2341,7 @@ static int ext3_rename (struct inode * o
- }
-
- if (new_inode) {
-- new_inode->i_nlink--;
-+ ext3_dec_count(handle, new_inode);
- new_inode->i_ctime = CURRENT_TIME;
- }
- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
-@@ -2346,11 +2352,13 @@ static int ext3_rename (struct inode * o
- PARENT_INO(dir_bh->b_data) = le32_to_cpu(new_dir->i_ino);
- BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, dir_bh);
-- old_dir->i_nlink--;
-+ ext3_dec_count(handle, old_dir);
- if (new_inode) {
-- new_inode->i_nlink--;
-+ /* checked empty_dir above, can't have another parent,
-+ * ext3_dec_count() won't work for many-linked dirs */
-+ new_inode->i_nlink = 0;
- } else {
-- new_dir->i_nlink++;
-+ ext3_inc_count(handle, new_dir);
- ext3_update_dx_flag(new_dir);
- ext3_mark_inode_dirty(handle, new_dir);
- }
-Index: linux-2.6.5-7.283/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-7.283.orig/include/linux/ext3_fs.h
-+++ linux-2.6.5-7.283/include/linux/ext3_fs.h
-@@ -86,7 +86,7 @@ struct statfs;
- /*
- * Maximal count of links to a file
- */
--#define EXT3_LINK_MAX 32000
-+#define EXT3_LINK_MAX 65000
-
- /*
- * Macro-instructions used to manage several block sizes
-@@ -538,6 +538,7 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
- #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
-+#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -553,6 +554,7 @@ static inline struct ext3_inode_info *EX
- EXT3_FEATURE_INCOMPAT_EXTENTS)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
+++ /dev/null
-Index: linux-2.6.12/fs/ext3/namei.c
-===================================================================
---- linux-2.6.12.orig/fs/ext3/namei.c
-+++ linux-2.6.12/fs/ext3/namei.c
-@@ -1600,11 +1600,17 @@ static int ext3_delete_entry (handle_t *
- static inline void ext3_inc_count(handle_t *handle, struct inode *inode)
- {
- inode->i_nlink++;
-+ if (is_dx(inode) && inode->i_nlink > 1) {
-+ /* limit is 16-bit i_links_count */
-+ if (inode->i_nlink >= EXT3_LINK_MAX || inode->i_nlink == 2)
-+ inode->i_nlink = 1;
-+ }
- }
-
- static inline void ext3_dec_count(handle_t *handle, struct inode *inode)
- {
-- inode->i_nlink--;
-+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
-+ inode->i_nlink--;
- }
-
- static int ext3_add_nondir(handle_t *handle,
-@@ -1703,7 +1709,7 @@ static int ext3_mkdir(struct inode * dir
- struct ext3_dir_entry_2 * de;
- int err, retries = 0;
-
-- if (dir->i_nlink >= EXT3_LINK_MAX)
-+ if (EXT3_DIR_LINK_MAX(dir))
- return -EMLINK;
-
- retry:
-@@ -1726,7 +1732,7 @@ retry:
- inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- dir_block = ext3_bread (handle, inode, 0, 1, &err);
- if (!dir_block) {
-- inode->i_nlink--; /* is this nlink == 0? */
-+ ext3_dec_count(handle, inode); /* is this nlink == 0? */
- ext3_mark_inode_dirty(handle, inode);
- iput (inode);
- goto out_stop;
-@@ -1758,7 +1764,7 @@ retry:
- iput (inode);
- goto out_stop;
- }
-- dir->i_nlink++;
-+ ext3_inc_count(handle, dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
- d_instantiate(dentry, inode);
-@@ -2023,10 +2029,10 @@ static int ext3_rmdir (struct inode * di
- retval = ext3_delete_entry(handle, dir, de, bh);
- if (retval)
- goto end_rmdir;
-- if (inode->i_nlink != 2)
-- ext3_warning (inode->i_sb, "ext3_rmdir",
-- "empty directory has nlink!=2 (%d)",
-- inode->i_nlink);
-+ if (!EXT3_DIR_LINK_EMPTY(inode))
-+ ext3_warning(inode->i_sb, "ext3_rmdir",
-+ "empty directory has too many links (%d)",
-+ inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
- /* There's no need to set i_disksize: the fact that i_nlink is
-@@ -2036,7 +2042,7 @@ static int ext3_rmdir (struct inode * di
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- ext3_mark_inode_dirty(handle, inode);
-- dir->i_nlink--;
-+ ext3_dec_count(handle, dir);
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
-
-@@ -2087,7 +2093,7 @@ static int ext3_unlink(struct inode * di
- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
-- inode->i_nlink--;
-+ ext3_dec_count(handle, inode);
- if (!inode->i_nlink)
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime;
-@@ -2162,7 +2168,7 @@ static int ext3_link (struct dentry * ol
- struct inode *inode = old_dentry->d_inode;
- int err, retries = 0;
-
-- if (inode->i_nlink >= EXT3_LINK_MAX)
-+ if (EXT3_DIR_LINK_MAX(inode))
- return -EMLINK;
-
- retry:
-@@ -2249,8 +2255,8 @@ static int ext3_rename (struct inode * o
- if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
- goto end_rename;
- retval = -EMLINK;
-- if (!new_inode && new_dir!=old_dir &&
-- new_dir->i_nlink >= EXT3_LINK_MAX)
-+ if (!new_inode && new_dir != old_dir &&
-+ EXT3_DIR_LINK_MAX(new_dir))
- goto end_rename;
- }
- if (!new_bh) {
-@@ -2307,7 +2313,7 @@ static int ext3_rename (struct inode * o
- }
-
- if (new_inode) {
-- new_inode->i_nlink--;
-+ ext3_dec_count(handle, new_inode);
- new_inode->i_ctime = CURRENT_TIME_SEC;
- }
- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-@@ -2318,11 +2324,13 @@ static int ext3_rename (struct inode * o
- PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
- BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, dir_bh);
-- old_dir->i_nlink--;
-+ ext3_dec_count(handle, old_dir);
- if (new_inode) {
-- new_inode->i_nlink--;
-+ /* checked empty_dir above, can't have another parent,
-+ * ext3_dec_count() won't work for many-linked dirs */
-+ new_inode->i_nlink = 0;
- } else {
-- new_dir->i_nlink++;
-+ ext3_inc_count(handle, new_dir);
- ext3_update_dx_flag(new_dir);
- ext3_mark_inode_dirty(handle, new_dir);
- }
-Index: linux-2.6.12/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12.orig/include/linux/ext3_fs.h
-+++ linux-2.6.12/include/linux/ext3_fs.h
-@@ -78,7 +78,7 @@ struct statfs;
- /*
- * Maximal count of links to a file
- */
--#define EXT3_LINK_MAX 32000
-+#define EXT3_LINK_MAX 65000
-
- /*
- * Macro-instructions used to manage several block sizes
-@@ -539,6 +539,7 @@ static inline struct ext3_inode_info *EX
- #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
- #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
- #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
-+#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
-
- #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
- #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
-@@ -552,6 +553,7 @@ static inline struct ext3_inode_info *EX
- EXT3_FEATURE_INCOMPAT_META_BG)
- #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
- EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
- EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
- /*
+++ /dev/null
-Index: linux-stage/fs/ext3/ialloc.c
-===================================================================
---- linux-stage.orig/fs/ext3/ialloc.c 2005-06-26 10:59:43.048185981 +0200
-+++ linux-stage/fs/ext3/ialloc.c 2005-06-26 11:01:21.317716027 +0200
-@@ -775,7 +775,6 @@
- if (!gdp)
- continue;
- desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-- cond_resched();
- }
- return desc_count;
- #endif
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2005-06-26 10:59:43.205412542 +0200
-+++ linux-stage/fs/ext3/super.c 2005-06-26 11:02:29.599941754 +0200
-@@ -2236,11 +2232,9 @@
- * block group descriptors. If the sparse superblocks
- * feature is turned on, then not all groups have this.
- */
-- for (i = 0; i < ngroups; i++) {
-+ for (i = 0; i < ngroups; i++)
- overhead += ext3_bg_has_super(sb, i) +
- ext3_bg_num_gdb(sb, i);
-- cond_resched();
-- }
-
- /*
- * Every block group has an inode bitmap, a block
+++ /dev/null
-Index: linux-2.6.5-sles9/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs.h 2004-11-09 02:29:14.878513832 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs.h 2004-11-09 02:32:14.151260232 +0300
-@@ -709,7 +709,7 @@
- unsigned int block_group,
- struct buffer_head ** bh);
- extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
--extern void rsv_window_add(struct super_block *sb, struct reserve_window_node *rsv);
-+extern void rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
-
- /* dir.c */
- extern int ext3_check_dir_entry(const char *, struct inode *,
-Index: linux-2.6.5-sles9/include/linux/ext3_fs_sb.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs_sb.h 2004-11-09 02:28:18.753046200 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs_sb.h 2004-11-09 02:32:27.996155488 +0300
-@@ -86,7 +86,7 @@
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
-- struct reserve_window_node s_rsv_window_head;
-+ struct ext3_reserve_window_node s_rsv_window_head;
-
- /* Journaling */
- struct inode * s_journal_inode;
-Index: linux-2.6.5-sles9/include/linux/ext3_fs_i.h
-===================================================================
---- linux-2.6.5-sles9.orig/include/linux/ext3_fs_i.h 2004-11-09 02:23:21.606219384 +0300
-+++ linux-2.6.5-sles9/include/linux/ext3_fs_i.h 2004-11-09 02:32:08.752081032 +0300
-@@ -20,17 +20,17 @@
- #include <linux/rbtree.h>
- #include <linux/seqlock.h>
-
--struct reserve_window {
-+struct ext3_reserve_window {
- __u32 _rsv_start; /* First byte reserved */
- __u32 _rsv_end; /* Last byte reserved or 0 */
- };
-
--struct reserve_window_node {
-+struct ext3_reserve_window_node {
- struct rb_node rsv_node;
- atomic_t rsv_goal_size;
- atomic_t rsv_alloc_hit;
- seqlock_t rsv_seqlock;
-- struct reserve_window rsv_window;
-+ struct ext3_reserve_window rsv_window;
- };
-
- #define rsv_start rsv_window._rsv_start
-@@ -76,7 +76,7 @@
- */
- __u32 i_next_alloc_goal;
- /* block reservation window */
-- struct reserve_window_node i_rsv_window;
-+ struct ext3_reserve_window_node i_rsv_window;
-
- __u32 i_dir_start_lookup;
- #ifdef CONFIG_EXT3_FS_XATTR
-Index: linux-2.6.5-sles9/fs/ext3/balloc.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/balloc.c 2004-11-09 02:26:53.078070776 +0300
-+++ linux-2.6.5-sles9/fs/ext3/balloc.c 2004-11-09 02:32:43.108858008 +0300
-@@ -115,7 +115,7 @@
- const char *fn)
- {
- struct rb_node *n;
-- struct reserve_window_node *rsv, *prev;
-+ struct ext3_reserve_window_node *rsv, *prev;
- int bad;
-
- restart:
-@@ -125,7 +125,7 @@
-
- printk("Block Allocation Reservation Windows Map (%s):\n", fn);
- while (n) {
-- rsv = list_entry(n, struct reserve_window_node, rsv_node);
-+ rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
- if (verbose)
- printk("reservation window 0x%p "
- "start: %d, end: %d\n",
-@@ -161,7 +161,7 @@
- #endif
-
- static int
--goal_in_my_reservation(struct reserve_window *rsv, int goal,
-+goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal,
- unsigned int group, struct super_block * sb)
- {
- unsigned long group_first_block, group_last_block;
-@@ -184,18 +184,18 @@
- * if the goal is not in any window.
- * Returns NULL if there are no windows or if all windows start after the goal.
- */
--static struct reserve_window_node *search_reserve_window(struct rb_root *root,
-+static struct ext3_reserve_window_node *search_ext3_reserve_window(struct rb_root *root,
- unsigned long goal)
- {
- struct rb_node *n = root->rb_node;
-- struct reserve_window_node *rsv;
-+ struct ext3_reserve_window_node *rsv;
-
- if (!n)
- return NULL;
-
- while (n)
- {
-- rsv = rb_entry(n, struct reserve_window_node, rsv_node);
-+ rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-
- if (goal < rsv->rsv_start)
- n = n->rb_left;
-@@ -212,13 +212,13 @@
- */
- if (rsv->rsv_start > goal) {
- n = rb_prev(&rsv->rsv_node);
-- rsv = rb_entry(n, struct reserve_window_node, rsv_node);
-+ rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
- }
- return rsv;
- }
-
- void rsv_window_add(struct super_block *sb,
-- struct reserve_window_node *rsv)
-+ struct ext3_reserve_window_node *rsv)
- {
- struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
- struct rb_node *node = &rsv->rsv_node;
-@@ -226,12 +226,12 @@
-
- struct rb_node ** p = &root->rb_node;
- struct rb_node * parent = NULL;
-- struct reserve_window_node *this;
-+ struct ext3_reserve_window_node *this;
-
- while (*p)
- {
- parent = *p;
-- this = rb_entry(parent, struct reserve_window_node, rsv_node);
-+ this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
-
- if (start < this->rsv_start)
- p = &(*p)->rb_left;
-@@ -246,7 +246,7 @@
- }
-
- static void rsv_window_remove(struct super_block *sb,
-- struct reserve_window_node *rsv)
-+ struct ext3_reserve_window_node *rsv)
- {
- rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
- rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-@@ -254,7 +254,7 @@
- rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
- }
-
--static inline int rsv_is_empty(struct reserve_window *rsv)
-+static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
- {
- /* a valid reservation end block could not be 0 */
- return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED);
-@@ -263,7 +263,7 @@
- void ext3_discard_reservation(struct inode *inode)
- {
- struct ext3_inode_info *ei = EXT3_I(inode);
-- struct reserve_window_node *rsv = &ei->i_rsv_window;
-+ struct ext3_reserve_window_node *rsv = &ei->i_rsv_window;
- spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
-
- if (!rsv_is_empty(&rsv->rsv_window)) {
-@@ -600,7 +600,7 @@
- */
- static int
- ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-- struct buffer_head *bitmap_bh, int goal, struct reserve_window *my_rsv)
-+ struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
- {
- int group_first_block, start, end;
-
-@@ -700,13 +700,13 @@
- * on succeed, it returns the reservation window to be appended to.
- * failed, return NULL.
- */
--static struct reserve_window_node *find_next_reservable_window(
-- struct reserve_window_node *search_head,
-+static struct ext3_reserve_window_node *find_next_reservable_window(
-+ struct ext3_reserve_window_node *search_head,
- unsigned long size, int *start_block,
- int last_block)
- {
- struct rb_node *next;
-- struct reserve_window_node *rsv, *prev;
-+ struct ext3_reserve_window_node *rsv, *prev;
- int cur;
-
- /* TODO: make the start of the reservation window byte-aligned */
-@@ -734,7 +734,7 @@
-
- prev = rsv;
- next = rb_next(&rsv->rsv_node);
-- rsv = list_entry(next, struct reserve_window_node, rsv_node);
-+ rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
-
- /*
- * Reached the last reservation, we can just append to the
-@@ -801,15 +801,15 @@
- * @group: the group we are trying to allocate in
- * @bitmap_bh: the block group block bitmap
- */
--static int alloc_new_reservation(struct reserve_window_node *my_rsv,
-+static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
- int goal, struct super_block *sb,
- unsigned int group, struct buffer_head *bitmap_bh)
- {
-- struct reserve_window_node *search_head;
-+ struct ext3_reserve_window_node *search_head;
- int group_first_block, group_end_block, start_block;
- int first_free_block;
- int reservable_space_start;
-- struct reserve_window_node *prev_rsv;
-+ struct ext3_reserve_window_node *prev_rsv;
- struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
- unsigned long size;
-
-@@ -859,7 +859,7 @@
- /*
- * shift the search start to the window near the goal block
- */
-- search_head = search_reserve_window(fs_rsv_root, start_block);
-+ search_head = search_ext3_reserve_window(fs_rsv_root, start_block);
-
- /*
- * find_next_reservable_window() simply finds a reservable window
-@@ -968,7 +968,7 @@
- static int
- ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
- unsigned int group, struct buffer_head *bitmap_bh,
-- int goal, struct reserve_window_node * my_rsv,
-+ int goal, struct ext3_reserve_window_node * my_rsv,
- int *errp)
- {
- spinlock_t *rsv_lock;
-@@ -1027,7 +1027,7 @@
- * then we could go to allocate from the reservation window directly.
- */
- while (1) {
-- struct reserve_window rsv_copy;
-+ struct ext3_reserve_window rsv_copy;
- unsigned int seq;
-
- do {
-@@ -1159,8 +1159,8 @@
- struct ext3_group_desc *gdp;
- struct ext3_super_block *es;
- struct ext3_sb_info *sbi;
-- struct reserve_window_node *my_rsv = NULL;
-- struct reserve_window_node *rsv = &EXT3_I(inode)->i_rsv_window;
-+ struct ext3_reserve_window_node *my_rsv = NULL;
-+ struct ext3_reserve_window_node *rsv = &EXT3_I(inode)->i_rsv_window;
- unsigned short windowsz = 0;
- #ifdef EXT3FS_DEBUG
- static int goal_hits, goal_attempts;
+++ /dev/null
- fs/ext3/inode.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- fs/ext3/super.c | 4 ++
- 2 files changed, 85 insertions(+)
-
---- linux-2.5.73/fs/ext3/inode.c~ext3-san-jdike-2.5.73 2003-06-22 12:32:58.000000000 -0600
-+++ linux-2.5.73-braam/fs/ext3/inode.c 2003-06-30 12:19:21.000000000 -0600
-@@ -2945,3 +2945,84 @@ int ext3_change_inode_journal_flag(struc
-
- return err;
- }
-+
-+/* for each block: 1 ind + 1 dind + 1 tind
-+ * for each block: 3 bitmap blocks
-+ * for each block: 3 group descriptor blocks
-+ * i inode block
-+ * 1 superblock
-+ * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
-+ * ((1+1+1) * 3 * nblocks) + 1 + 1 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
-+ *
-+ * XXX assuming:
-+ * (1) fs logic block size == page size
-+ * (2) ext3 in writeback mode
-+ */
-+static inline int ext3_san_write_trans_blocks(int nblocks)
-+{
-+ int ret;
-+
-+ ret = (1 + 1 + 1) * 3 * nblocks + 1 + 1;
-+
-+#ifdef CONFIG_QUOTA
-+ ret += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
-+#endif
-+
-+ return ret;
-+}
-+
-+/* Alloc blocks for an inode, while don't create any buffer/page
-+ * for data I/O; set the inode size if file is extended.
-+ *
-+ * @inode: target inode
-+ * @blocks: array of logic block number
-+ * @nblocks: how many blocks need be alloced
-+ * @newsize: new filesize we should set
-+ *
-+ * return: 0 success, otherwise failed
-+ * (*blocks) contains physical block number alloced
-+ *
-+ * XXX this assume the fs block size == page size
-+ */
-+int ext3_prep_san_write(struct inode *inode, long *blocks,
-+ int nblocks, loff_t newsize)
-+{
-+ handle_t *handle;
-+ struct buffer_head bh_tmp;
-+ int needed_blocks;
-+ int i, ret = 0, ret2;
-+
-+ needed_blocks = ext3_san_write_trans_blocks(nblocks);
-+
-+ lock_kernel();
-+ handle = ext3_journal_start(inode, needed_blocks);
-+ if (IS_ERR(handle)) {
-+ unlock_kernel();
-+ return PTR_ERR(handle);
-+ }
-+ unlock_kernel();
-+
-+ /* alloc blocks one by one */
-+ for (i = 0; i < nblocks; i++) {
-+ ret = ext3_get_block_handle(handle, inode, blocks[i],
-+ &bh_tmp, 1, 1);
-+ if (ret)
-+ break;
-+
-+ blocks[i] = bh_tmp.b_blocknr;
-+ }
-+
-+ /* set inode size if needed */
-+ if (!ret && (newsize > inode->i_size)) {
-+ inode->i_size = newsize;
-+ ext3_mark_inode_dirty(handle, inode);
-+ }
-+
-+ lock_kernel();
-+ ret2 = ext3_journal_stop(handle);
-+ unlock_kernel();
-+
-+ if (!ret)
-+ ret = ret2;
-+ return ret;
-+}
---- linux-2.5.73/fs/ext3/super.c~ext3-san-jdike-2.5.73 2003-06-22 12:33:16.000000000 -0600
-+++ linux-2.5.73-braam/fs/ext3/super.c 2003-06-30 12:16:36.000000000 -0600
-@@ -2080,6 +2080,10 @@ static void __exit exit_ext3_fs(void)
- exit_ext3_xattr();
- }
-
-+int ext3_prep_san_write(struct inode *inode, long *blocks,
-+ int nblocks, loff_t newsize);
-+EXPORT_SYMBOL(ext3_prep_san_write);
-+
- MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
- MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
- MODULE_LICENSE("GPL");
-
-_
+++ /dev/null
-Subject: Avoid disk sector_t overflow for >2TB ext3 filesystem
-From: Mingming Cao <cmm@us.ibm.com>
-
-
-If ext3 filesystem is larger than 2TB, and sector_t is a u32 (i.e.
-CONFIG_LBD not defined in the kernel), the calculation of the disk sector
-will overflow. Add check at ext3_fill_super() and ext3_group_extend() to
-prevent mount/remount/resize >2TB ext3 filesystem if sector_t size is 4
-bytes.
-
-Verified this patch on a 32 bit platform without CONFIG_LBD defined
-(sector_t is 32 bits long), mount refuse to mount a 10TB ext3.
-
-Signed-off-by: Mingming Cao<cmm@us.ibm.com>
-Acked-by: Andreas Dilger <adilger@clusterfs.com>
-Signed-off-by: Andrew Morton <akpm@osdl.org>
----
-
- fs/ext3/resize.c | 10 ++++++++++
- fs/ext3/super.c | 10 ++++++++++
- 2 files changed, 20 insertions(+)
-
-diff -puN fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/resize.c
---- devel/fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem 2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/resize.c 2006-05-22 14:10:56.000000000 -0700
-@@ -926,6 +926,16 @@ int ext3_group_extend(struct super_block
- if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
- return 0;
-
-+ if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+ printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+ "too large to resize to %lu blocks safely\n",
-+ sb->s_id, n_blocks_count);
-+ if (sizeof(sector_t) < 8)
-+ ext3_warning(sb, __FUNCTION__,
-+ "CONFIG_LBD not enabled\n");
-+ return -EINVAL;
-+ }
-+
- if (n_blocks_count < o_blocks_count) {
- ext3_warning(sb, __FUNCTION__,
- "can't shrink FS - resize aborted");
-diff -puN fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/super.c
---- devel/fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem 2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/super.c 2006-05-22 14:11:10.000000000 -0700
-@@ -1565,6 +1565,17 @@ static int ext3_fill_super (struct super
- goto failed_mount;
- }
-
-+ if (le32_to_cpu(es->s_blocks_count) >
-+ (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+ printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+ "too large to mount safely - %u blocks\n", sb->s_id,
-+ le32_to_cpu(es->s_blocks_count));
-+ if (sizeof(sector_t) < 8)
-+ printk(KERN_WARNING
-+ "EXT3-fs: CONFIG_LBD not enabled\n");
-+ goto failed_mount;
-+ }
-+
- if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
- goto cantfind_ext3;
- sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
-_
+++ /dev/null
-Subject: Avoid disk sector_t overflow for >2TB ext3 filesystem
-From: Mingming Cao <cmm@us.ibm.com>
-
-
-If ext3 filesystem is larger than 2TB, and sector_t is a u32 (i.e.
-CONFIG_LBD not defined in the kernel), the calculation of the disk sector
-will overflow. Add check at ext3_fill_super() and ext3_group_extend() to
-prevent mount/remount/resize >2TB ext3 filesystem if sector_t size is 4
-bytes.
-
-Verified this patch on a 32 bit platform without CONFIG_LBD defined
-(sector_t is 32 bits long), mount refuse to mount a 10TB ext3.
-
-Signed-off-by: Mingming Cao<cmm@us.ibm.com>
-Acked-by: Andreas Dilger <adilger@clusterfs.com>
-Signed-off-by: Andrew Morton <akpm@osdl.org>
----
-
- fs/ext3/resize.c | 10 ++++++++++
- fs/ext3/super.c | 10 ++++++++++
- 2 files changed, 20 insertions(+)
-
-diff -puN fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/super.c
---- devel/fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem 2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/super.c 2006-05-22 14:11:10.000000000 -0700
-@@ -1565,6 +1565,17 @@ static int ext3_fill_super (struct super
- goto failed_mount;
- }
-
-+ if (le32_to_cpu(es->s_blocks_count) >
-+ (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+ printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+ "too large to mount safely - %u blocks\n", sb->s_id,
-+ le32_to_cpu(es->s_blocks_count));
-+ if (sizeof(sector_t) < 8)
-+ printk(KERN_WARNING
-+ "EXT3-fs: CONFIG_LBD not enabled\n");
-+ goto failed_mount;
-+ }
-+
- sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) +
- EXT3_BLOCKS_PER_GROUP(sb) - 1) /
-_
+++ /dev/null
-Subject: Avoid disk sector_t overflow for >2TB ext3 filesystem
-From: Mingming Cao <cmm@us.ibm.com>
-
-
-If ext3 filesystem is larger than 2TB, and sector_t is a u32 (i.e.
-CONFIG_LBD not defined in the kernel), the calculation of the disk sector
-will overflow. Add check at ext3_fill_super() and ext3_group_extend() to
-prevent mount/remount/resize >2TB ext3 filesystem if sector_t size is 4
-bytes.
-
-Verified this patch on a 32 bit platform without CONFIG_LBD defined
-(sector_t is 32 bits long), mount refuse to mount a 10TB ext3.
-
-Signed-off-by: Mingming Cao<cmm@us.ibm.com>
-Acked-by: Andreas Dilger <adilger@clusterfs.com>
-Signed-off-by: Andrew Morton <akpm@osdl.org>
----
-
- fs/ext3/resize.c | 10 ++++++++++
- fs/ext3/super.c | 10 ++++++++++
- 2 files changed, 20 insertions(+)
-
-diff -puN fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/resize.c
---- devel/fs/ext3/resize.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem 2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/resize.c 2006-05-22 14:10:56.000000000 -0700
-@@ -926,6 +926,16 @@ int ext3_group_extend(struct super_block
- if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
- return 0;
-
-+ if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+ printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+ "too large to resize to %lu blocks safely\n",
-+ sb->s_id, n_blocks_count);
-+ if (sizeof(sector_t) < 8)
-+ ext3_warning(sb, __FUNCTION__,
-+ "CONFIG_LBD not enabled\n");
-+ return -EINVAL;
-+ }
-+
- if (n_blocks_count < o_blocks_count) {
- ext3_warning(sb, __FUNCTION__,
- "can't shrink FS - resize aborted");
-diff -puN fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem fs/ext3/super.c
---- devel/fs/ext3/super.c~avoid-disk-sector_t-overflow-for-2tb-ext3-filesystem 2006-05-22 14:09:53.000000000 -0700
-+++ devel-akpm/fs/ext3/super.c 2006-05-22 14:11:10.000000000 -0700
-@@ -1565,6 +1565,17 @@ static int ext3_fill_super (struct super
- goto failed_mount;
- }
-
-+ if (le32_to_cpu(es->s_blocks_count) >
-+ (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-+ printk(KERN_ERR "EXT3-fs: filesystem on %s: "
-+ "too large to mount safely - %u blocks\n", sb->s_id,
-+ le32_to_cpu(es->s_blocks_count));
-+ if (sizeof(sector_t) < 8)
-+ printk(KERN_WARNING
-+ "EXT3-fs: CONFIG_LBD not enabled\n");
-+ goto failed_mount;
-+ }
-+
- sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) +
- EXT3_BLOCKS_PER_GROUP(sb) - 1) /
-_
+++ /dev/null
-diff -urp RH_2_6_9_42_0_3.orig/fs/ext3/ialloc.c RH_2_6_9_42_0_3/fs/ext3/ialloc.c
---- RH_2_6_9_42_0_3.orig/fs/ext3/ialloc.c 2006-10-23 13:32:46.000000000 +0300
-+++ RH_2_6_9_42_0_3/fs/ext3/ialloc.c 2007-02-16 07:22:28.000000000 +0200
-@@ -419,7 +419,8 @@ static int find_group_other(struct super
- * For other inodes, search forward from the parent directory's block
- * group to find a free inode.
- */
--struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
-+struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode,
-+ unsigned long goal)
- {
- struct super_block *sb;
- struct buffer_head *bitmap_bh = NULL;
-@@ -447,6 +448,41 @@ struct inode *ext3_new_inode(handle_t *h
-
- sbi = EXT3_SB(sb);
- es = sbi->s_es;
-+ if (goal) {
-+ group = (goal - 1) / EXT3_INODES_PER_GROUP(sb);
-+ ino = (goal - 1) % EXT3_INODES_PER_GROUP(sb);
-+ err = -EIO;
-+
-+ gdp = ext3_get_group_desc(sb, group, &bh2);
-+ if (!gdp)
-+ goto fail;
-+
-+ bitmap_bh = read_inode_bitmap (sb, group);
-+ if (!bitmap_bh)
-+ goto fail;
-+
-+ BUFFER_TRACE(bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) goto fail;
-+
-+ if (ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
-+ ino, bitmap_bh->b_data)) {
-+ printk(KERN_ERR "goal inode %lu unavailable\n", goal);
-+ /* Oh well, we tried. */
-+ goto continue_allocation;
-+ }
-+
-+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err) goto fail;
-+
-+ /* We've shortcircuited the allocation system successfully,
-+ * now finish filling in the inode.
-+ */
-+ goto got;
-+ }
-+
-+continue_allocation:
- if (S_ISDIR(mode)) {
- if (test_opt (sb, OLDALLOC))
- group = find_group_dir(sb, dir);
-diff -urp RH_2_6_9_42_0_3.orig/fs/ext3/ioctl.c RH_2_6_9_42_0_3/fs/ext3/ioctl.c
---- RH_2_6_9_42_0_3.orig/fs/ext3/ioctl.c 2006-10-23 13:32:46.000000000 +0300
-+++ RH_2_6_9_42_0_3/fs/ext3/ioctl.c 2007-02-16 07:22:28.000000000 +0200
-@@ -25,6 +25,31 @@ int ext3_ioctl (struct inode * inode, st
- ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
-
- switch (cmd) {
-+ case EXT3_IOC_CREATE_INUM: {
-+ char name[32];
-+ struct dentry *dchild, *dparent;
-+ int rc = 0;
-+
-+ dparent = list_entry(inode->i_dentry.next, struct dentry,
-+ d_alias);
-+ snprintf(name, sizeof name, "%lu", arg);
-+ dchild = lookup_one_len(name, dparent, strlen(name));
-+ if (dchild->d_inode) {
-+ printk(KERN_ERR "%*s/%lu already exists (ino %lu)\n",
-+ dparent->d_name.len, dparent->d_name.name, arg,
-+ dchild->d_inode->i_ino);
-+ rc = -EEXIST;
-+ } else {
-+ dchild->d_fsdata = (void *)arg;
-+ rc = vfs_create(inode, dchild, 0644, NULL);
-+ if (rc)
-+ printk(KERN_ERR "vfs_create: %d\n", rc);
-+ else if (dchild->d_inode->i_ino != arg)
-+ rc = -EEXIST;
-+ }
-+ dput(dchild);
-+ return rc;
-+ }
- case EXT3_IOC_GETFLAGS:
- flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
- return put_user(flags, (int __user *) arg);
-diff -urp RH_2_6_9_42_0_3.orig/fs/ext3/namei.c RH_2_6_9_42_0_3/fs/ext3/namei.c
---- RH_2_6_9_42_0_3.orig/fs/ext3/namei.c 2006-10-23 13:32:59.000000000 +0300
-+++ RH_2_6_9_42_0_3/fs/ext3/namei.c 2007-02-22 18:58:13.000000000 +0200
-@@ -97,6 +97,7 @@ struct dx_entry
- __le32 block;
- };
-
-+
- /*
- * dx_root_info is laid out so that if it should somehow get overlaid by a
- * dirent the two low bits of the hash version will be zero. Therefore, the
-@@ -141,6 +142,14 @@ struct dx_map_entry
- u32 offs;
- };
-
-+#define LVFS_DENTRY_PARAM_MAGIC 20070216UL
-+struct lvfs_dentry_params
-+{
-+ unsigned long p_inum;
-+ void *p_ptr;
-+ u32 magic;
-+};
-+
- #ifdef CONFIG_EXT3_INDEX
- static inline unsigned dx_get_block (struct dx_entry *entry);
- static void dx_set_block (struct dx_entry *entry, unsigned value);
-@@ -1624,6 +1633,20 @@ static int ext3_add_nondir(handle_t *han
- return err;
- }
-
-+static struct inode * ext3_new_inode_wantedi(handle_t *handle, struct inode *dir,
-+ int mode, struct dentry *dentry)
-+{
-+ unsigned long inum = 0;
-+
-+ if (dentry->d_fsdata != NULL) {
-+ struct lvfs_dentry_params *param = dentry->d_fsdata;
-+
-+ if (param->magic == LVFS_DENTRY_PARAM_MAGIC)
-+ inum = param->p_inum;
-+ }
-+ return ext3_new_inode(handle, dir, mode, inum);
-+}
-+
- /*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
-@@ -1649,7 +1672,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, mode, dentry);
- err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- inode->i_op = &ext3_file_inode_operations;
-@@ -1683,7 +1706,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, mode, dentry);
- err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- init_special_inode(inode, inode->i_mode, rdev);
-@@ -1719,7 +1742,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, S_IFDIR | mode, dentry);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
-@@ -2124,7 +2147,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
-+ inode = ext3_new_inode_wantedi (handle, dir, S_IFLNK|S_IRWXUGO, dentry);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
-diff -urp RH_2_6_9_42_0_3.orig/include/linux/ext3_fs.h RH_2_6_9_42_0_3/include/linux/ext3_fs.h
---- RH_2_6_9_42_0_3.orig/include/linux/ext3_fs.h 2006-10-23 13:32:46.000000000 +0300
-+++ RH_2_6_9_42_0_3/include/linux/ext3_fs.h 2007-02-16 07:22:28.000000000 +0200
-@@ -741,7 +741,8 @@ extern int ext3fs_dirhash(const char *na
- dx_hash_info *hinfo);
-
- /* ialloc.c */
--extern struct inode * ext3_new_inode (handle_t *, struct inode *, int);
-+extern struct inode * ext3_new_inode (handle_t *, struct inode *, int,
-+ unsigned long);
- extern void ext3_free_inode (handle_t *, struct inode *);
- extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
- extern unsigned long ext3_count_free_inodes (struct super_block *);
-@@ -833,4 +834,6 @@ extern struct inode_operations ext3_fast
-
- #endif /* __KERNEL__ */
-
-+/* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
-+#define EXT3_IOC_CREATE_INUM _IOW('f', 5, long)
- #endif /* _LINUX_EXT3_FS_H */
+++ /dev/null
-diff -urp linux-2.6.5-7.282.orig/fs/ext3/ialloc.c linux-2.6.5-7.282/fs/ext3/ialloc.c
---- linux-2.6.5-7.282.orig/fs/ext3/ialloc.c 2006-08-30 17:12:13.000000000 +0300
-+++ linux-2.6.5-7.282/fs/ext3/ialloc.c 2007-02-16 07:43:08.000000000 +0200
-@@ -420,7 +420,8 @@ static int find_group_other(struct super
- * For other inodes, search forward from the parent directory's block
- * group to find a free inode.
- */
--struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
-+struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode,
-+ unsigned long goal)
- {
- struct super_block *sb;
- struct buffer_head *bitmap_bh = NULL;
-@@ -448,6 +449,41 @@ struct inode *ext3_new_inode(handle_t *h
-
- sbi = EXT3_SB(sb);
- es = sbi->s_es;
-+ if (goal) {
-+ group = (goal - 1) / EXT3_INODES_PER_GROUP(sb);
-+ ino = (goal - 1) % EXT3_INODES_PER_GROUP(sb);
-+ err = -EIO;
-+
-+ gdp = ext3_get_group_desc(sb, group, &bh2);
-+ if (!gdp)
-+ goto fail;
-+
-+ bitmap_bh = read_inode_bitmap (sb, group);
-+ if (!bitmap_bh)
-+ goto fail;
-+
-+ BUFFER_TRACE(bh, "get_write_access");
-+ err = ext3_journal_get_write_access(handle, bitmap_bh);
-+ if (err) goto fail;
-+
-+ if (ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
-+ ino, bitmap_bh->b_data)) {
-+ printk(KERN_ERR "goal inode %lu unavailable\n", goal);
-+ /* Oh well, we tried. */
-+ goto continue_allocation;
-+ }
-+
-+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-+ err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-+ if (err) goto fail;
-+
-+ /* We've shortcircuited the allocation system successfully,
-+ * now finish filling in the inode.
-+ */
-+ goto got;
-+ }
-+
-+continue_allocation:
- if (S_ISDIR(mode)) {
- if (test_opt (sb, OLDALLOC))
- group = find_group_dir(sb, dir);
-diff -urp linux-2.6.5-7.282.orig/fs/ext3/ioctl.c linux-2.6.5-7.282/fs/ext3/ioctl.c
---- linux-2.6.5-7.282.orig/fs/ext3/ioctl.c 2006-08-30 17:12:13.000000000 +0300
-+++ linux-2.6.5-7.282/fs/ext3/ioctl.c 2007-02-16 07:43:08.000000000 +0200
-@@ -25,6 +25,31 @@ int ext3_ioctl (struct inode * inode, st
- ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
-
- switch (cmd) {
-+ case EXT3_IOC_CREATE_INUM: {
-+ char name[32];
-+ struct dentry *dchild, *dparent;
-+ int rc = 0;
-+
-+ dparent = list_entry(inode->i_dentry.next, struct dentry,
-+ d_alias);
-+ snprintf(name, sizeof name, "%lu", arg);
-+ dchild = lookup_one_len(name, dparent, strlen(name));
-+ if (dchild->d_inode) {
-+ printk(KERN_ERR "%*s/%lu already exists (ino %lu)\n",
-+ dparent->d_name.len, dparent->d_name.name, arg,
-+ dchild->d_inode->i_ino);
-+ rc = -EEXIST;
-+ } else {
-+ dchild->d_fsdata = (void *)arg;
-+ rc = vfs_create(inode, dchild, 0644, NULL);
-+ if (rc)
-+ printk(KERN_ERR "vfs_create: %d\n", rc);
-+ else if (dchild->d_inode->i_ino != arg)
-+ rc = -EEXIST;
-+ }
-+ dput(dchild);
-+ return rc;
-+ }
- case EXT3_IOC_GETFLAGS:
- flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
- return put_user(flags, (int *) arg);
-diff -urp linux-2.6.5-7.282.orig/fs/ext3/namei.c linux-2.6.5-7.282/fs/ext3/namei.c
---- linux-2.6.5-7.282.orig/fs/ext3/namei.c 2006-08-30 17:12:34.000000000 +0300
-+++ linux-2.6.5-7.282/fs/ext3/namei.c 2007-02-16 07:46:13.000000000 +0200
-@@ -144,6 +144,14 @@ struct dx_map_entry
- u32 offs;
- };
-
-+#define LVFS_DENTRY_PARAM_MAGIC 20070216UL
-+struct lvfs_dentry_params
-+{
-+ unsigned long p_inum;
-+ void *p_ptr;
-+ u32 magic;
-+};
-+
- #ifdef CONFIG_EXT3_INDEX
- static inline unsigned dx_get_block (struct dx_entry *entry);
- static void dx_set_block (struct dx_entry *entry, unsigned value);
-@@ -1625,6 +1633,20 @@ static int ext3_add_nondir(handle_t *han
- return err;
- }
-
-+static struct inode * ext3_new_inode_wantedi(handle_t *handle, struct inode *dir,
-+ int mode, struct dentry *dentry)
-+{
-+ unsigned long inum = 0;
-+
-+ if (dentry->d_fsdata != NULL) {
-+ struct lvfs_dentry_params *param = dentry->d_fsdata;
-+
-+ if (param->magic == LVFS_DENTRY_PARAM_MAGIC)
-+ inum = param->p_inum;
-+ }
-+ return ext3_new_inode(handle, dir, mode, inum);
-+}
-+
- /*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
-@@ -1649,7 +1671,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, mode, dentry);
- err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- inode->i_op = &ext3_file_inode_operations;
-@@ -1682,7 +1704,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, mode, dentry);
- err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- init_special_inode(inode, inode->i_mode, rdev);
-@@ -1718,7 +1740,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
-+ inode = ext3_new_inode_wantedi (handle, dir, S_IFDIR | mode, dentry);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
-@@ -2113,7 +2135,7 @@ retry:
- if (IS_DIRSYNC(dir))
- handle->h_sync = 1;
-
-- inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
-+ inode = ext3_new_inode_wantedi (handle, dir, S_IFLNK|S_IRWXUGO, dentry);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
-diff -urp linux-2.6.5-7.282.orig/include/linux/ext3_fs.h linux-2.6.5-7.282/include/linux/ext3_fs.h
---- linux-2.6.5-7.282.orig/include/linux/ext3_fs.h 2006-08-30 17:12:13.000000000 +0300
-+++ linux-2.6.5-7.282/include/linux/ext3_fs.h 2007-02-16 07:43:08.000000000 +0200
-@@ -203,6 +203,7 @@ struct ext3_group_desc
- #define EXT3_IOC_SETFLAGS _IOW('f', 2, long)
- #define EXT3_IOC_GETVERSION _IOR('f', 3, long)
- #define EXT3_IOC_SETVERSION _IOW('f', 4, long)
-+/* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
- #define EXT3_IOC_GETVERSION_OLD _IOR('v', 1, long)
- #define EXT3_IOC_SETVERSION_OLD _IOW('v', 2, long)
- #ifdef CONFIG_JBD_DEBUG
-@@ -712,7 +713,8 @@ extern int ext3fs_dirhash(const char *na
- dx_hash_info *hinfo);
-
- /* ialloc.c */
--extern struct inode * ext3_new_inode (handle_t *, struct inode *, int);
-+extern struct inode * ext3_new_inode (handle_t *, struct inode *, int,
-+ unsigned long);
- extern void ext3_free_inode (handle_t *, struct inode *);
- extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
- extern unsigned long ext3_count_free_inodes (struct super_block *);
-@@ -797,4 +799,5 @@ extern struct inode_operations ext3_fast
-
- #endif /* __KERNEL__ */
-
-+#define EXT3_IOC_CREATE_INUM _IOW('f', 5, long)
- #endif /* _LINUX_EXT3_FS_H */
+++ /dev/null
-Index: linux-2.6.16.i686/fs/ext3/iopen.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/iopen.c 2006-05-31 04:14:15.752410384 +0800
-+++ linux-2.6.16.i686/fs/ext3/iopen.c 2006-05-30 22:52:38.000000000 +0800
-@@ -0,0 +1,259 @@
-+/*
-+ * linux/fs/ext3/iopen.c
-+ *
-+ * Special support for open by inode number
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ *
-+ *
-+ * Invariants:
-+ * - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
-+ * for an inode at one time.
-+ * - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
-+ * aliases on an inode at the same time.
-+ *
-+ * If we have any connected dentry aliases for an inode, use one of those
-+ * in iopen_lookup(). Otherwise, we instantiate a single NFSD_DISCONNECTED
-+ * dentry for this inode, which thereafter will be found by the dcache
-+ * when looking up this inode number in __iopen__, so we don't return here
-+ * until it is gone.
-+ *
-+ * If we get an inode via a regular name lookup, then we "rename" the
-+ * NFSD_DISCONNECTED dentry to the proper name and parent. This ensures
-+ * existing users of the disconnected dentry will continue to use the same
-+ * dentry as the connected users, and there will never be both kinds of
-+ * dentry aliases at one time.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/dcache.h>
-+#include <linux/security.h>
-+#include "iopen.h"
-+
-+#ifndef assert
-+#define assert(test) J_ASSERT(test)
-+#endif
-+
-+#define IOPEN_NAME_LEN 32
-+
-+/*
-+ * This implements looking up an inode by number.
-+ */
-+static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ struct inode *inode;
-+ unsigned long ino;
-+ struct list_head *lp;
-+ struct dentry *alternate;
-+ char buf[IOPEN_NAME_LEN];
-+
-+ if (dentry->d_name.len >= IOPEN_NAME_LEN)
-+ return ERR_PTR(-ENAMETOOLONG);
-+
-+ memcpy(buf, dentry->d_name.name, dentry->d_name.len);
-+ buf[dentry->d_name.len] = 0;
-+
-+ if (strcmp(buf, ".") == 0)
-+ ino = dir->i_ino;
-+ else if (strcmp(buf, "..") == 0)
-+ ino = EXT3_ROOT_INO;
-+ else
-+ ino = simple_strtoul(buf, 0, 0);
-+
-+ if ((ino != EXT3_ROOT_INO &&
-+ //ino != EXT3_ACL_IDX_INO &&
-+ //ino != EXT3_ACL_DATA_INO &&
-+ ino < EXT3_FIRST_INO(dir->i_sb)) ||
-+ ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
-+ return ERR_PTR(-ENOENT);
-+
-+ inode = iget(dir->i_sb, ino);
-+ if (!inode)
-+ return ERR_PTR(-EACCES);
-+ if (is_bad_inode(inode)) {
-+ iput(inode);
-+ return ERR_PTR(-ENOENT);
-+ }
-+
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+
-+ /* preferrably return a connected dentry */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ alternate = list_entry(lp, struct dentry, d_alias);
-+ assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
-+ }
-+
-+ if (!list_empty(&inode->i_dentry)) {
-+ alternate = list_entry(inode->i_dentry.next,
-+ struct dentry, d_alias);
-+ dget_locked(alternate);
-+ spin_lock(&alternate->d_lock);
-+ alternate->d_flags |= DCACHE_REFERENCED;
-+ spin_unlock(&alternate->d_lock);
-+ iput(inode);
-+ spin_unlock(&dcache_lock);
-+ return alternate;
-+ }
-+ dentry->d_flags |= DCACHE_DISCONNECTED;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+ spin_unlock(&dcache_lock);
-+
-+ d_rehash(dentry);
-+
-+ return NULL;
-+}
-+
-+/* This function is spliced into ext3_lookup and does the move of a
-+ * disconnected dentry (if it exists) to a connected dentry.
-+ */
-+struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
-+ int rehash)
-+{
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* verify this dentry is really new */
-+ assert(dentry->d_inode == NULL);
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ if (rehash)
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+ assert(list_empty(&dentry->d_subdirs));
-+
-+ spin_lock(&dcache_lock);
-+ if (!inode)
-+ goto do_rehash;
-+
-+ if (!test_opt(inode->i_sb, IOPEN))
-+ goto do_instantiate;
-+
-+ /* preferrably return a connected dentry */
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ if (tmp->d_flags & DCACHE_DISCONNECTED) {
-+ assert(tmp->d_alias.next == &inode->i_dentry);
-+ assert(tmp->d_alias.prev == &inode->i_dentry);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ }
-+
-+ if (!goal)
-+ goto do_instantiate;
-+
-+ /* Move the goal to the de hash queue */
-+ goal->d_flags &= ~DCACHE_DISCONNECTED;
-+ security_d_instantiate(goal, inode);
-+ __d_drop(dentry);
-+ spin_unlock(&dcache_lock);
-+ d_rehash(dentry);
-+ d_move(goal, dentry);
-+ iput(inode);
-+
-+ return goal;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+do_instantiate:
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+do_rehash:
-+ spin_unlock(&dcache_lock);
-+ if (rehash)
-+ d_rehash(dentry);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * These are the special structures for the iopen pseudo directory.
-+ */
-+
-+static struct inode_operations iopen_inode_operations = {
-+ lookup: iopen_lookup, /* BKL held */
-+};
-+
-+static struct file_operations iopen_file_operations = {
-+ read: generic_read_dir,
-+};
-+
-+static int match_dentry(struct dentry *dentry, const char *name)
-+{
-+ int len;
-+
-+ len = strlen(name);
-+ if (dentry->d_name.len != len)
-+ return 0;
-+ if (strncmp(dentry->d_name.name, name, len))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into ext3_lookup and returns 1 the file
-+ * name is __iopen__ and dentry has been filled in appropriately.
-+ */
-+int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
-+{
-+ struct inode *inode;
-+
-+ if (dir->i_ino != EXT3_ROOT_INO ||
-+ !test_opt(dir->i_sb, IOPEN) ||
-+ !match_dentry(dentry, "__iopen__"))
-+ return 0;
-+
-+ inode = iget(dir->i_sb, EXT3_BAD_INO);
-+
-+ if (!inode)
-+ return 0;
-+ d_add(dentry, inode);
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into read_inode; it returns 1 if inode
-+ * number is the one for /__iopen__, in which case the inode is filled
-+ * in appropriately. Otherwise, this fuction returns 0.
-+ */
-+int ext3_iopen_get_inode(struct inode *inode)
-+{
-+ if (inode->i_ino != EXT3_BAD_INO)
-+ return 0;
-+
-+ inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
-+ if (test_opt(inode->i_sb, IOPEN_NOPRIV))
-+ inode->i_mode |= 0777;
-+ inode->i_uid = 0;
-+ inode->i_gid = 0;
-+ inode->i_nlink = 1;
-+ inode->i_size = 4096;
-+ inode->i_atime = CURRENT_TIME;
-+ inode->i_ctime = CURRENT_TIME;
-+ inode->i_mtime = CURRENT_TIME;
-+ EXT3_I(inode)->i_dtime = 0;
-+ inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
-+ * (for stat), not the fs block
-+ * size */
-+ inode->i_blocks = 0;
-+ inode->i_version = 1;
-+ inode->i_generation = 0;
-+
-+ inode->i_op = &iopen_inode_operations;
-+ inode->i_fop = &iopen_file_operations;
-+ inode->i_mapping->a_ops = 0;
-+
-+ return 1;
-+}
-Index: linux-2.6.16.i686/fs/ext3/iopen.h
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/iopen.h 2006-05-31 04:14:15.752410384 +0800
-+++ linux-2.6.16.i686/fs/ext3/iopen.h 2006-05-30 22:52:38.000000000 +0800
-@@ -0,0 +1,15 @@
-+/*
-+ * iopen.h
-+ *
-+ * Special support for opening files by inode number.
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ */
-+
-+extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
-+extern int ext3_iopen_get_inode(struct inode *inode);
-+extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
-+ struct inode *inode, int rehash);
-Index: linux-2.6.16.i686/fs/ext3/inode.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/inode.c 2006-05-30 22:52:03.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/inode.c 2006-05-30 22:52:38.000000000 +0800
-@@ -37,6 +37,7 @@
- #include <linux/mpage.h>
- #include <linux/uio.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- static int ext3_writepage_trans_blocks(struct inode *inode);
-@@ -2448,6 +2449,8 @@
- ei->i_default_acl = EXT3_ACL_NOT_CACHED;
- #endif
- ei->i_block_alloc_info = NULL;
-+ if (ext3_iopen_get_inode(inode))
-+ return;
-
- if (__ext3_get_inode_loc(inode, &iloc, 0))
- goto bad_inode;
-Index: linux-2.6.16.i686/fs/ext3/super.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/super.c 2006-05-30 22:52:03.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/super.c 2006-05-30 22:52:38.000000000 +0800
-@@ -634,6 +634,7 @@
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
-+ Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- Opt_grpquota
- };
-
-@@ -682,6 +683,9 @@
- {Opt_noquota, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
-+ {Opt_iopen, "iopen"},
-+ {Opt_noiopen, "noiopen"},
-+ {Opt_iopen_nopriv, "iopen_nopriv"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -996,6 +1000,18 @@
- else
- clear_opt(sbi->s_mount_opt, BARRIER);
- break;
-+ case Opt_iopen:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_noiopen:
-+ clear_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_iopen_nopriv:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
- case Opt_ignore:
- break;
- case Opt_resize:
-Index: linux-2.6.16.i686/fs/ext3/namei.c
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/namei.c 2006-05-30 22:52:00.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/namei.c 2006-05-30 22:55:19.000000000 +0800
-@@ -39,6 +39,7 @@
-
- #include "namei.h"
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -995,6 +996,9 @@
- if (dentry->d_name.len > EXT3_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
-+ if (ext3_check_for_iopen(dir, dentry))
-+ return NULL;
-+
- bh = ext3_find_entry(dentry, &de);
- inode = NULL;
- if (bh) {
-@@ -1005,7 +1009,7 @@
- if (!inode)
- return ERR_PTR(-EACCES);
- }
-- return d_splice_alias(inode, dentry);
-+ return iopen_connect_dentry(dentry, inode, 1);
- }
-
-
-@@ -2046,10 +2050,6 @@
- inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
-- /* There's no need to set i_disksize: the fact that i_nlink is
-- * zero will ensure that the right thing happens during any
-- * recovery. */
-- inode->i_size = 0;
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- ext3_mark_inode_dirty(handle, inode);
-@@ -2173,6 +2173,23 @@
- return err;
- }
-
-+/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
-+static int ext3_add_link(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode)
-+{
-+ int err = ext3_add_entry(handle, dentry, inode);
-+ if (!err) {
-+ err = ext3_mark_inode_dirty(handle, inode);
-+ if (err == 0) {
-+ dput(iopen_connect_dentry(dentry, inode, 0));
-+ return 0;
-+ }
-+ }
-+ ext3_dec_count(handle, inode);
-+ iput(inode);
-+ return err;
-+}
-+
- static int ext3_link (struct dentry * old_dentry,
- struct inode * dir, struct dentry *dentry)
- {
-@@ -2196,7 +2213,8 @@
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-- err = ext3_add_nondir(handle, dentry, inode);
-+ err = ext3_add_link(handle, dentry, inode);
-+ ext3_orphan_del(handle, inode);
- ext3_journal_stop(handle);
- if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
-Index: linux-2.6.16.i686/fs/ext3/Makefile
-===================================================================
---- linux-2.6.16.i686.orig/fs/ext3/Makefile 2006-03-20 13:53:29.000000000 +0800
-+++ linux-2.6.16.i686/fs/ext3/Makefile 2006-05-30 22:52:38.000000000 +0800
-@@ -4,7 +4,7 @@
-
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
--ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-+ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
-Index: linux-2.6.16.i686/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.16.i686.orig/include/linux/ext3_fs.h 2006-05-30 22:52:00.000000000 +0800
-+++ linux-2.6.16.i686/include/linux/ext3_fs.h 2006-05-30 22:52:38.000000000 +0800
-@@ -375,6 +375,8 @@
- #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
- #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
- #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
-+#define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */
-+#define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
+++ /dev/null
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile 2005-02-25 14:31:53.151076368 +0200
-+++ linux-stage/fs/ext3/Makefile 2005-02-25 14:41:51.259150120 +0200
-@@ -4,7 +4,7 @@
-
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
--ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-+ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2005-02-25 14:37:30.983718000 +0200
-+++ linux-stage/fs/ext3/inode.c 2005-02-25 14:47:42.069818792 +0200
-@@ -37,6 +37,7 @@
- #include <linux/mpage.h>
- #include <linux/uio.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -2408,6 +2409,8 @@
- ei->i_default_acl = EXT3_ACL_NOT_CACHED;
- #endif
- ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-+ if (ext3_iopen_get_inode(inode))
-+ return;
-
- if (ext3_get_inode_loc(inode, &iloc, 0))
- goto bad_inode;
-Index: linux-stage/fs/ext3/iopen.c
-===================================================================
---- linux-stage.orig/fs/ext3/iopen.c 2005-02-25 14:41:01.017787968 +0200
-+++ linux-stage/fs/ext3/iopen.c 2005-02-25 14:41:01.045783712 +0200
-@@ -0,0 +1,278 @@
-+/*
-+ * linux/fs/ext3/iopen.c
-+ *
-+ * Special support for open by inode number
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ *
-+ *
-+ * Invariants:
-+ * - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
-+ * for an inode at one time.
-+ * - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
-+ * aliases on an inode at the same time.
-+ *
-+ * If we have any connected dentry aliases for an inode, use one of those
-+ * in iopen_lookup(). Otherwise, we instantiate a single NFSD_DISCONNECTED
-+ * dentry for this inode, which thereafter will be found by the dcache
-+ * when looking up this inode number in __iopen__, so we don't return here
-+ * until it is gone.
-+ *
-+ * If we get an inode via a regular name lookup, then we "rename" the
-+ * NFSD_DISCONNECTED dentry to the proper name and parent. This ensures
-+ * existing users of the disconnected dentry will continue to use the same
-+ * dentry as the connected users, and there will never be both kinds of
-+ * dentry aliases at one time.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/dcache.h>
-+#include <linux/security.h>
-+#include "iopen.h"
-+
-+#ifndef assert
-+#define assert(test) J_ASSERT(test)
-+#endif
-+
-+#define IOPEN_NAME_LEN 32
-+
-+/*
-+ * This implements looking up an inode by number.
-+ */
-+static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ struct inode *inode;
-+ unsigned long ino;
-+ struct list_head *lp;
-+ struct dentry *alternate;
-+ char buf[IOPEN_NAME_LEN];
-+
-+ if (dentry->d_name.len >= IOPEN_NAME_LEN)
-+ return ERR_PTR(-ENAMETOOLONG);
-+
-+ memcpy(buf, dentry->d_name.name, dentry->d_name.len);
-+ buf[dentry->d_name.len] = 0;
-+
-+ if (strcmp(buf, ".") == 0)
-+ ino = dir->i_ino;
-+ else if (strcmp(buf, "..") == 0)
-+ ino = EXT3_ROOT_INO;
-+ else
-+ ino = simple_strtoul(buf, 0, 0);
-+
-+ if ((ino != EXT3_ROOT_INO &&
-+ //ino != EXT3_ACL_IDX_INO &&
-+ //ino != EXT3_ACL_DATA_INO &&
-+ ino < EXT3_FIRST_INO(dir->i_sb)) ||
-+ ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
-+ return ERR_PTR(-ENOENT);
-+
-+ inode = iget(dir->i_sb, ino);
-+ if (!inode)
-+ return ERR_PTR(-EACCES);
-+ if (is_bad_inode(inode)) {
-+ iput(inode);
-+ return ERR_PTR(-ENOENT);
-+ }
-+
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+
-+ /* preferrably return a connected dentry */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ alternate = list_entry(lp, struct dentry, d_alias);
-+ assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
-+ }
-+
-+ if (!list_empty(&inode->i_dentry)) {
-+ alternate = list_entry(inode->i_dentry.next,
-+ struct dentry, d_alias);
-+ dget_locked(alternate);
-+ spin_lock(&alternate->d_lock);
-+ alternate->d_flags |= DCACHE_REFERENCED;
-+ spin_unlock(&alternate->d_lock);
-+ iput(inode);
-+ spin_unlock(&dcache_lock);
-+ return alternate;
-+ }
-+ dentry->d_flags |= DCACHE_DISCONNECTED;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+
-+ __d_rehash(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+#define do_switch(x,y) do { \
-+ __typeof__ (x) __tmp = x; \
-+ x = y; y = __tmp; } while (0)
-+
-+static inline void switch_names(struct dentry *dentry, struct dentry *target)
-+{
-+ const unsigned char *old_name, *new_name;
-+
-+ memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN_MIN);
-+ old_name = target->d_name.name;
-+ new_name = dentry->d_name.name;
-+ if (old_name == target->d_iname)
-+ old_name = dentry->d_iname;
-+ if (new_name == dentry->d_iname)
-+ new_name = target->d_iname;
-+ target->d_name.name = new_name;
-+ dentry->d_name.name = old_name;
-+}
-+
-+/* This function is spliced into ext3_lookup and does the move of a
-+ * disconnected dentry (if it exists) to a connected dentry.
-+ */
-+struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
-+ int rehash)
-+{
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* verify this dentry is really new */
-+ assert(dentry->d_inode == NULL);
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ if (rehash)
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+ assert(list_empty(&dentry->d_subdirs));
-+
-+ spin_lock(&dcache_lock);
-+ if (!inode)
-+ goto do_rehash;
-+
-+ if (!test_opt(inode->i_sb, IOPEN))
-+ goto do_instantiate;
-+
-+ /* preferrably return a connected dentry */
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ if (tmp->d_flags & DCACHE_DISCONNECTED) {
-+ assert(tmp->d_alias.next == &inode->i_dentry);
-+ assert(tmp->d_alias.prev == &inode->i_dentry);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ }
-+
-+ if (!goal)
-+ goto do_instantiate;
-+
-+ /* Move the goal to the de hash queue */
-+ goal->d_flags &= ~DCACHE_DISCONNECTED;
-+ security_d_instantiate(goal, inode);
-+ __d_drop(dentry);
-+ __d_rehash(dentry, 0);
-+ __d_move(goal, dentry);
-+ spin_unlock(&dcache_lock);
-+ iput(inode);
-+
-+ return goal;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+do_instantiate:
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+do_rehash:
-+ if (rehash)
-+ __d_rehash(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * These are the special structures for the iopen pseudo directory.
-+ */
-+
-+static struct inode_operations iopen_inode_operations = {
-+ lookup: iopen_lookup, /* BKL held */
-+};
-+
-+static struct file_operations iopen_file_operations = {
-+ read: generic_read_dir,
-+};
-+
-+static int match_dentry(struct dentry *dentry, const char *name)
-+{
-+ int len;
-+
-+ len = strlen(name);
-+ if (dentry->d_name.len != len)
-+ return 0;
-+ if (strncmp(dentry->d_name.name, name, len))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into ext3_lookup and returns 1 the file
-+ * name is __iopen__ and dentry has been filled in appropriately.
-+ */
-+int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
-+{
-+ struct inode *inode;
-+
-+ if (dir->i_ino != EXT3_ROOT_INO ||
-+ !test_opt(dir->i_sb, IOPEN) ||
-+ !match_dentry(dentry, "__iopen__"))
-+ return 0;
-+
-+ inode = iget(dir->i_sb, EXT3_BAD_INO);
-+
-+ if (!inode)
-+ return 0;
-+ d_add(dentry, inode);
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into read_inode; it returns 1 if inode
-+ * number is the one for /__iopen__, in which case the inode is filled
-+ * in appropriately. Otherwise, this fuction returns 0.
-+ */
-+int ext3_iopen_get_inode(struct inode *inode)
-+{
-+ if (inode->i_ino != EXT3_BAD_INO)
-+ return 0;
-+
-+ inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
-+ if (test_opt(inode->i_sb, IOPEN_NOPRIV))
-+ inode->i_mode |= 0777;
-+ inode->i_uid = 0;
-+ inode->i_gid = 0;
-+ inode->i_nlink = 1;
-+ inode->i_size = 4096;
-+ inode->i_atime = CURRENT_TIME;
-+ inode->i_ctime = CURRENT_TIME;
-+ inode->i_mtime = CURRENT_TIME;
-+ EXT3_I(inode)->i_dtime = 0;
-+ inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
-+ * (for stat), not the fs block
-+ * size */
-+ inode->i_blocks = 0;
-+ inode->i_version = 1;
-+ inode->i_generation = 0;
-+
-+ inode->i_op = &iopen_inode_operations;
-+ inode->i_fop = &iopen_file_operations;
-+ inode->i_mapping->a_ops = 0;
-+
-+ return 1;
-+}
-Index: linux-stage/fs/ext3/iopen.h
-===================================================================
---- linux-stage.orig/fs/ext3/iopen.h 2005-02-25 14:41:01.017787968 +0200
-+++ linux-stage/fs/ext3/iopen.h 2005-02-25 14:41:01.045783712 +0200
-@@ -0,0 +1,15 @@
-+/*
-+ * iopen.h
-+ *
-+ * Special support for opening files by inode number.
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ */
-+
-+extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
-+extern int ext3_iopen_get_inode(struct inode *inode);
-+extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
-+ struct inode *inode, int rehash);
-Index: linux-stage/fs/ext3/namei.c
-===================================================================
---- linux-stage.orig/fs/ext3/namei.c 2005-02-25 14:37:28.975023368 +0200
-+++ linux-stage/fs/ext3/namei.c 2005-02-25 14:46:43.090784968 +0200
-@@ -37,6 +37,7 @@
- #include <linux/buffer_head.h>
- #include <linux/smp_lock.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -980,6 +981,9 @@
- if (dentry->d_name.len > EXT3_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
-+ if (ext3_check_for_iopen(dir, dentry))
-+ return NULL;
-+
- bh = ext3_find_entry(dentry, &de);
- inode = NULL;
- if (bh) {
-@@ -990,10 +994,8 @@
- if (!inode)
- return ERR_PTR(-EACCES);
- }
-- if (inode)
-- return d_splice_alias(inode, dentry);
-- d_add(dentry, inode);
-- return NULL;
-+
-+ return iopen_connect_dentry(dentry, inode, 1);
- }
-
-
-@@ -2037,10 +2039,6 @@
- inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
-- /* There's no need to set i_disksize: the fact that i_nlink is
-- * zero will ensure that the right thing happens during any
-- * recovery. */
-- inode->i_size = 0;
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- ext3_mark_inode_dirty(handle, inode);
-@@ -2163,6 +2161,23 @@
- return err;
- }
-
-+/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
-+static int ext3_add_link(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode)
-+{
-+ int err = ext3_add_entry(handle, dentry, inode);
-+ if (!err) {
-+ err = ext3_mark_inode_dirty(handle, inode);
-+ if (err == 0) {
-+ dput(iopen_connect_dentry(dentry, inode, 0));
-+ return 0;
-+ }
-+ }
-+ ext3_dec_count(handle, inode);
-+ iput(inode);
-+ return err;
-+}
-+
- static int ext3_link (struct dentry * old_dentry,
- struct inode * dir, struct dentry *dentry)
- {
-@@ -2186,7 +2201,8 @@
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-- err = ext3_add_nondir(handle, dentry, inode);
-+ err = ext3_add_link(handle, dentry, inode);
-+ ext3_orphan_del(handle, inode);
- ext3_journal_stop(handle);
- if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2005-02-25 14:37:30.987717392 +0200
-+++ linux-stage/fs/ext3/super.c 2005-02-25 14:44:50.495901992 +0200
-@@ -586,6 +586,7 @@
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
-+ Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- };
-
- static match_table_t tokens = {
-@@ -633,6 +634,9 @@
- {Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
- {Opt_ignore, "usrquota"},
-+ {Opt_iopen, "iopen"},
-+ {Opt_noiopen, "noiopen"},
-+ {Opt_iopen_nopriv, "iopen_nopriv"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -914,6 +918,18 @@
- else
- clear_opt(sbi->s_mount_opt, BARRIER);
- break;
-+ case Opt_iopen:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_noiopen:
-+ clear_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_iopen_nopriv:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
- case Opt_ignore:
- break;
- case Opt_resize:
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2005-02-25 14:37:28.977023064 +0200
-+++ linux-stage/include/linux/ext3_fs.h 2005-02-25 14:49:00.569884968 +0200
-@@ -355,6 +355,8 @@
- #define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
- #define EXT3_MOUNT_BARRIER 0x10000 /* Use block barriers */
- #define EXT3_MOUNT_RESERVATION 0x20000 /* Preallocation */
-+#define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
-+#define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
+++ /dev/null
-Index: linux-stage/fs/ext3/Makefile
-===================================================================
---- linux-stage.orig/fs/ext3/Makefile 2005-02-25 14:31:53.151076368 +0200
-+++ linux-stage/fs/ext3/Makefile 2005-02-25 14:41:51.259150120 +0200
-@@ -4,7 +4,7 @@
-
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
--ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-+ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
-Index: linux-stage/fs/ext3/inode.c
-===================================================================
---- linux-stage.orig/fs/ext3/inode.c 2005-02-25 14:37:30.983718000 +0200
-+++ linux-stage/fs/ext3/inode.c 2005-02-25 14:47:42.069818792 +0200
-@@ -37,6 +37,7 @@
- #include <linux/mpage.h>
- #include <linux/uio.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -2408,6 +2409,9 @@
- #endif
- ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-
-+ if (ext3_iopen_get_inode(inode))
-+ return;
-+
- if (ext3_get_inode_loc(inode, &iloc, 0))
- goto bad_inode;
- bh = iloc.bh;
-Index: linux-stage/fs/ext3/iopen.c
-===================================================================
---- linux-2.6.5-sles9.orig/fs/ext3/iopen.c 2003-01-30 13:24:37.000000000 +0300
-+++ linux-2.6.5-sles9/fs/ext3/iopen.c 2004-11-09 02:18:27.611913312 +0300
-@@ -0,0 +1,278 @@
-+/*
-+ * linux/fs/ext3/iopen.c
-+ *
-+ * Special support for open by inode number
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ *
-+ *
-+ * Invariants:
-+ * - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
-+ * for an inode at one time.
-+ * - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
-+ * aliases on an inode at the same time.
-+ *
-+ * If we have any connected dentry aliases for an inode, use one of those
-+ * in iopen_lookup(). Otherwise, we instantiate a single NFSD_DISCONNECTED
-+ * dentry for this inode, which thereafter will be found by the dcache
-+ * when looking up this inode number in __iopen__, so we don't return here
-+ * until it is gone.
-+ *
-+ * If we get an inode via a regular name lookup, then we "rename" the
-+ * NFSD_DISCONNECTED dentry to the proper name and parent. This ensures
-+ * existing users of the disconnected dentry will continue to use the same
-+ * dentry as the connected users, and there will never be both kinds of
-+ * dentry aliases at one time.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/dcache.h>
-+#include <linux/security.h>
-+#include "iopen.h"
-+
-+#ifndef assert
-+#define assert(test) J_ASSERT(test)
-+#endif
-+
-+#define IOPEN_NAME_LEN 32
-+
-+/*
-+ * This implements looking up an inode by number.
-+ */
-+static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ struct inode *inode;
-+ unsigned long ino;
-+ struct list_head *lp;
-+ struct dentry *alternate;
-+ char buf[IOPEN_NAME_LEN];
-+
-+ if (dentry->d_name.len >= IOPEN_NAME_LEN)
-+ return ERR_PTR(-ENAMETOOLONG);
-+
-+ memcpy(buf, dentry->d_name.name, dentry->d_name.len);
-+ buf[dentry->d_name.len] = 0;
-+
-+ if (strcmp(buf, ".") == 0)
-+ ino = dir->i_ino;
-+ else if (strcmp(buf, "..") == 0)
-+ ino = EXT3_ROOT_INO;
-+ else
-+ ino = simple_strtoul(buf, 0, 0);
-+
-+ if ((ino != EXT3_ROOT_INO &&
-+ //ino != EXT3_ACL_IDX_INO &&
-+ //ino != EXT3_ACL_DATA_INO &&
-+ ino < EXT3_FIRST_INO(dir->i_sb)) ||
-+ ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
-+ return ERR_PTR(-ENOENT);
-+
-+ inode = iget(dir->i_sb, ino);
-+ if (!inode)
-+ return ERR_PTR(-EACCES);
-+ if (is_bad_inode(inode)) {
-+ iput(inode);
-+ return ERR_PTR(-ENOENT);
-+ }
-+
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+
-+ /* preferrably return a connected dentry */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ alternate = list_entry(lp, struct dentry, d_alias);
-+ assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
-+ }
-+
-+ if (!list_empty(&inode->i_dentry)) {
-+ alternate = list_entry(inode->i_dentry.next,
-+ struct dentry, d_alias);
-+ dget_locked(alternate);
-+ spin_lock(&alternate->d_lock);
-+ alternate->d_vfs_flags |= DCACHE_REFERENCED;
-+ spin_unlock(&alternate->d_lock);
-+ iput(inode);
-+ spin_unlock(&dcache_lock);
-+ return alternate;
-+ }
-+ dentry->d_flags |= DCACHE_DISCONNECTED;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+
-+ __d_rehash(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+#define do_switch(x,y) do { \
-+ __typeof__ (x) __tmp = x; \
-+ x = y; y = __tmp; } while (0)
-+
-+static inline void switch_names(struct dentry *dentry, struct dentry *target)
-+{
-+ const unsigned char *old_name, *new_name;
-+
-+ memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN);
-+ old_name = target->d_name.name;
-+ new_name = dentry->d_name.name;
-+ if (old_name == target->d_iname)
-+ old_name = dentry->d_iname;
-+ if (new_name == dentry->d_iname)
-+ new_name = target->d_iname;
-+ target->d_name.name = new_name;
-+ dentry->d_name.name = old_name;
-+}
-+
-+/* This function is spliced into ext3_lookup and does the move of a
-+ * disconnected dentry (if it exists) to a connected dentry.
-+ */
-+struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
-+ int rehash)
-+{
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* verify this dentry is really new */
-+ assert(dentry->d_inode == NULL);
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ if (rehash)
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+ assert(list_empty(&dentry->d_subdirs));
-+
-+ spin_lock(&dcache_lock);
-+ if (!inode)
-+ goto do_rehash;
-+
-+ if (!test_opt(inode->i_sb, IOPEN))
-+ goto do_instantiate;
-+
-+ /* preferrably return a connected dentry */
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ if (tmp->d_flags & DCACHE_DISCONNECTED) {
-+ assert(tmp->d_alias.next == &inode->i_dentry);
-+ assert(tmp->d_alias.prev == &inode->i_dentry);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ }
-+
-+ if (!goal)
-+ goto do_instantiate;
-+
-+ /* Move the goal to the de hash queue */
-+ goal->d_flags &= ~DCACHE_DISCONNECTED;
-+ security_d_instantiate(goal, inode);
-+ __d_drop(dentry);
-+ __d_rehash(dentry, 0);
-+ __d_move(goal, dentry);
-+ spin_unlock(&dcache_lock);
-+ iput(inode);
-+
-+ return goal;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+do_instantiate:
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+do_rehash:
-+ if (rehash)
-+ __d_rehash(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * These are the special structures for the iopen pseudo directory.
-+ */
-+
-+static struct inode_operations iopen_inode_operations = {
-+ lookup: iopen_lookup, /* BKL held */
-+};
-+
-+static struct file_operations iopen_file_operations = {
-+ read: generic_read_dir,
-+};
-+
-+static int match_dentry(struct dentry *dentry, const char *name)
-+{
-+ int len;
-+
-+ len = strlen(name);
-+ if (dentry->d_name.len != len)
-+ return 0;
-+ if (strncmp(dentry->d_name.name, name, len))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into ext3_lookup and returns 1 the file
-+ * name is __iopen__ and dentry has been filled in appropriately.
-+ */
-+int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
-+{
-+ struct inode *inode;
-+
-+ if (dir->i_ino != EXT3_ROOT_INO ||
-+ !test_opt(dir->i_sb, IOPEN) ||
-+ !match_dentry(dentry, "__iopen__"))
-+ return 0;
-+
-+ inode = iget(dir->i_sb, EXT3_BAD_INO);
-+
-+ if (!inode)
-+ return 0;
-+ d_add(dentry, inode);
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into read_inode; it returns 1 if inode
-+ * number is the one for /__iopen__, in which case the inode is filled
-+ * in appropriately. Otherwise, this fuction returns 0.
-+ */
-+int ext3_iopen_get_inode(struct inode *inode)
-+{
-+ if (inode->i_ino != EXT3_BAD_INO)
-+ return 0;
-+
-+ inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
-+ if (test_opt(inode->i_sb, IOPEN_NOPRIV))
-+ inode->i_mode |= 0777;
-+ inode->i_uid = 0;
-+ inode->i_gid = 0;
-+ inode->i_nlink = 1;
-+ inode->i_size = 4096;
-+ inode->i_atime = CURRENT_TIME;
-+ inode->i_ctime = CURRENT_TIME;
-+ inode->i_mtime = CURRENT_TIME;
-+ EXT3_I(inode)->i_dtime = 0;
-+ inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
-+ * (for stat), not the fs block
-+ * size */
-+ inode->i_blocks = 0;
-+ inode->i_version = 1;
-+ inode->i_generation = 0;
-+
-+ inode->i_op = &iopen_inode_operations;
-+ inode->i_fop = &iopen_file_operations;
-+ inode->i_mapping->a_ops = 0;
-+
-+ return 1;
-+}
-Index: linux-stage/fs/ext3/iopen.h
-===================================================================
---- linux-stage.orig/fs/ext3/iopen.h 2005-02-25 14:41:01.017787968 +0200
-+++ linux-stage/fs/ext3/iopen.h 2005-02-25 14:41:01.045783712 +0200
-@@ -0,0 +1,15 @@
-+/*
-+ * iopen.h
-+ *
-+ * Special support for opening files by inode number.
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ */
-+
-+extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
-+extern int ext3_iopen_get_inode(struct inode *inode);
-+extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
-+ struct inode *inode, int rehash);
-Index: linux-stage/fs/ext3/namei.c
-===================================================================
---- linux-stage.orig/fs/ext3/namei.c 2005-02-25 14:37:28.975023368 +0200
-+++ linux-stage/fs/ext3/namei.c 2005-02-25 14:46:43.090784968 +0200
-@@ -37,6 +37,7 @@
- #include <linux/buffer_head.h>
- #include <linux/smp_lock.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -980,6 +981,9 @@
- if (dentry->d_name.len > EXT3_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
-+ if (ext3_check_for_iopen(dir, dentry))
-+ return NULL;
-+
- bh = ext3_find_entry(dentry, &de);
- inode = NULL;
- if (bh) {
-@@ -990,10 +994,8 @@
- if (!inode)
- return ERR_PTR(-EACCES);
- }
-- if (inode)
-- return d_splice_alias(inode, dentry);
-- d_add(dentry, inode);
-- return NULL;
-+
-+ return iopen_connect_dentry(dentry, inode, 1);
- }
-
-
-@@ -2037,10 +2039,6 @@
- inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
-- /* There's no need to set i_disksize: the fact that i_nlink is
-- * zero will ensure that the right thing happens during any
-- * recovery. */
-- inode->i_size = 0;
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- ext3_mark_inode_dirty(handle, inode);
-@@ -2163,6 +2161,23 @@
- return err;
- }
-
-+/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
-+static int ext3_add_link(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode)
-+{
-+ int err = ext3_add_entry(handle, dentry, inode);
-+ if (!err) {
-+ err = ext3_mark_inode_dirty(handle, inode);
-+ if (err == 0) {
-+ dput(iopen_connect_dentry(dentry, inode, 0));
-+ return 0;
-+ }
-+ }
-+ ext3_dec_count(handle, inode);
-+ iput(inode);
-+ return err;
-+}
-+
- static int ext3_link (struct dentry * old_dentry,
- struct inode * dir, struct dentry *dentry)
- {
-@@ -2186,7 +2201,8 @@
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-- err = ext3_add_nondir(handle, dentry, inode);
-+ err = ext3_add_link(handle, dentry, inode);
-+ ext3_orphan_del(handle, inode);
- ext3_journal_stop(handle);
- if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
-Index: linux-stage/fs/ext3/super.c
-===================================================================
---- linux-stage.orig/fs/ext3/super.c 2005-02-25 14:37:30.987717392 +0200
-+++ linux-stage/fs/ext3/super.c 2005-02-25 14:44:50.495901992 +0200
-@@ -586,6 +586,7 @@
- Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- Opt_ignore, Opt_barrier,
- Opt_err,
-+ Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- };
-
- static match_table_t tokens = {
-@@ -633,6 +634,9 @@
- {Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
- {Opt_ignore, "usrquota"},
-+ {Opt_iopen, "iopen"},
-+ {Opt_noiopen, "noiopen"},
-+ {Opt_iopen_nopriv, "iopen_nopriv"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL}
- };
-@@ -914,6 +918,18 @@
- else
- clear_opt(sbi->s_mount_opt, BARRIER);
- break;
-+ case Opt_iopen:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_noiopen:
-+ clear_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_iopen_nopriv:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
- case Opt_ignore:
- break;
- default:
-Index: linux-stage/include/linux/ext3_fs.h
-===================================================================
---- linux-stage.orig/include/linux/ext3_fs.h 2005-02-25 14:37:28.977023064 +0200
-+++ linux-stage/include/linux/ext3_fs.h 2005-02-25 14:49:00.569884968 +0200
-@@ -355,6 +355,8 @@
- #define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
- #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
- #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
-+#define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
-+#define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
+++ /dev/null
-Index: linux-2.6.12-rc6/fs/ext3/Makefile
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/Makefile 2005-06-14 16:00:45.206720992 +0200
-+++ linux-2.6.12-rc6/fs/ext3/Makefile 2005-06-14 16:14:33.595382720 +0200
-@@ -4,7 +4,7 @@
-
- obj-$(CONFIG_EXT3_FS) += ext3.o
-
--ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-+ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o
-
- ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
-Index: linux-2.6.12-rc6/fs/ext3/inode.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/inode.c 2005-06-14 16:01:16.272150299 +0200
-+++ linux-2.6.12-rc6/fs/ext3/inode.c 2005-06-14 16:24:55.686195412 +0200
-@@ -37,6 +37,7 @@
- #include <linux/mpage.h>
- #include <linux/uio.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- static int ext3_writepage_trans_blocks(struct inode *inode);
-@@ -2437,6 +2438,8 @@
- ei->i_default_acl = EXT3_ACL_NOT_CACHED;
- #endif
- ei->i_block_alloc_info = NULL;
-+ if (ext3_iopen_get_inode(inode))
-+ return;
-
- if (__ext3_get_inode_loc(inode, &iloc, 0))
- goto bad_inode;
-Index: linux-2.6.12-rc6/fs/ext3/iopen.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/iopen.c 2005-06-14 16:14:33.530929595 +0200
-+++ linux-2.6.12-rc6/fs/ext3/iopen.c 2005-06-14 16:14:33.626632719 +0200
-@@ -0,0 +1,278 @@
-+/*
-+ * linux/fs/ext3/iopen.c
-+ *
-+ * Special support for open by inode number
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ *
-+ *
-+ * Invariants:
-+ * - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
-+ * for an inode at one time.
-+ * - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
-+ * aliases on an inode at the same time.
-+ *
-+ * If we have any connected dentry aliases for an inode, use one of those
-+ * in iopen_lookup(). Otherwise, we instantiate a single NFSD_DISCONNECTED
-+ * dentry for this inode, which thereafter will be found by the dcache
-+ * when looking up this inode number in __iopen__, so we don't return here
-+ * until it is gone.
-+ *
-+ * If we get an inode via a regular name lookup, then we "rename" the
-+ * NFSD_DISCONNECTED dentry to the proper name and parent. This ensures
-+ * existing users of the disconnected dentry will continue to use the same
-+ * dentry as the connected users, and there will never be both kinds of
-+ * dentry aliases at one time.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/ext3_jbd.h>
-+#include <linux/jbd.h>
-+#include <linux/ext3_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/dcache.h>
-+#include <linux/security.h>
-+#include "iopen.h"
-+
-+#ifndef assert
-+#define assert(test) J_ASSERT(test)
-+#endif
-+
-+#define IOPEN_NAME_LEN 32
-+
-+/*
-+ * This implements looking up an inode by number.
-+ */
-+static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ struct inode *inode;
-+ unsigned long ino;
-+ struct list_head *lp;
-+ struct dentry *alternate;
-+ char buf[IOPEN_NAME_LEN];
-+
-+ if (dentry->d_name.len >= IOPEN_NAME_LEN)
-+ return ERR_PTR(-ENAMETOOLONG);
-+
-+ memcpy(buf, dentry->d_name.name, dentry->d_name.len);
-+ buf[dentry->d_name.len] = 0;
-+
-+ if (strcmp(buf, ".") == 0)
-+ ino = dir->i_ino;
-+ else if (strcmp(buf, "..") == 0)
-+ ino = EXT3_ROOT_INO;
-+ else
-+ ino = simple_strtoul(buf, 0, 0);
-+
-+ if ((ino != EXT3_ROOT_INO &&
-+ //ino != EXT3_ACL_IDX_INO &&
-+ //ino != EXT3_ACL_DATA_INO &&
-+ ino < EXT3_FIRST_INO(dir->i_sb)) ||
-+ ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
-+ return ERR_PTR(-ENOENT);
-+
-+ inode = iget(dir->i_sb, ino);
-+ if (!inode)
-+ return ERR_PTR(-EACCES);
-+ if (is_bad_inode(inode)) {
-+ iput(inode);
-+ return ERR_PTR(-ENOENT);
-+ }
-+
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+
-+ /* preferrably return a connected dentry */
-+ spin_lock(&dcache_lock);
-+ list_for_each(lp, &inode->i_dentry) {
-+ alternate = list_entry(lp, struct dentry, d_alias);
-+ assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
-+ }
-+
-+ if (!list_empty(&inode->i_dentry)) {
-+ alternate = list_entry(inode->i_dentry.next,
-+ struct dentry, d_alias);
-+ dget_locked(alternate);
-+ spin_lock(&alternate->d_lock);
-+ alternate->d_flags |= DCACHE_REFERENCED;
-+ spin_unlock(&alternate->d_lock);
-+ iput(inode);
-+ spin_unlock(&dcache_lock);
-+ return alternate;
-+ }
-+ dentry->d_flags |= DCACHE_DISCONNECTED;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+
-+ d_rehash_cond(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+#define do_switch(x,y) do { \
-+ __typeof__ (x) __tmp = x; \
-+ x = y; y = __tmp; } while (0)
-+
-+static inline void switch_names(struct dentry *dentry, struct dentry *target)
-+{
-+ const unsigned char *old_name, *new_name;
-+
-+ memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN_MIN);
-+ old_name = target->d_name.name;
-+ new_name = dentry->d_name.name;
-+ if (old_name == target->d_iname)
-+ old_name = dentry->d_iname;
-+ if (new_name == dentry->d_iname)
-+ new_name = target->d_iname;
-+ target->d_name.name = new_name;
-+ dentry->d_name.name = old_name;
-+}
-+
-+/* This function is spliced into ext3_lookup and does the move of a
-+ * disconnected dentry (if it exists) to a connected dentry.
-+ */
-+struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
-+ int rehash)
-+{
-+ struct dentry *tmp, *goal = NULL;
-+ struct list_head *lp;
-+
-+ /* verify this dentry is really new */
-+ assert(dentry->d_inode == NULL);
-+ assert(list_empty(&dentry->d_alias)); /* d_instantiate */
-+ if (rehash)
-+ assert(d_unhashed(dentry)); /* d_rehash */
-+ assert(list_empty(&dentry->d_subdirs));
-+
-+ spin_lock(&dcache_lock);
-+ if (!inode)
-+ goto do_rehash;
-+
-+ if (!test_opt(inode->i_sb, IOPEN))
-+ goto do_instantiate;
-+
-+ /* preferrably return a connected dentry */
-+ list_for_each(lp, &inode->i_dentry) {
-+ tmp = list_entry(lp, struct dentry, d_alias);
-+ if (tmp->d_flags & DCACHE_DISCONNECTED) {
-+ assert(tmp->d_alias.next == &inode->i_dentry);
-+ assert(tmp->d_alias.prev == &inode->i_dentry);
-+ goal = tmp;
-+ dget_locked(goal);
-+ break;
-+ }
-+ }
-+
-+ if (!goal)
-+ goto do_instantiate;
-+
-+ /* Move the goal to the de hash queue */
-+ goal->d_flags &= ~DCACHE_DISCONNECTED;
-+ security_d_instantiate(goal, inode);
-+ __d_drop(dentry);
-+ d_rehash_cond(dentry, 0);
-+ __d_move(goal, dentry);
-+ spin_unlock(&dcache_lock);
-+ iput(inode);
-+
-+ return goal;
-+
-+ /* d_add(), but don't drop dcache_lock before adding dentry to inode */
-+do_instantiate:
-+ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */
-+ dentry->d_inode = inode;
-+do_rehash:
-+ if (rehash)
-+ d_rehash_cond(dentry, 0); /* d_rehash */
-+ spin_unlock(&dcache_lock);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * These are the special structures for the iopen pseudo directory.
-+ */
-+
-+static struct inode_operations iopen_inode_operations = {
-+ lookup: iopen_lookup, /* BKL held */
-+};
-+
-+static struct file_operations iopen_file_operations = {
-+ read: generic_read_dir,
-+};
-+
-+static int match_dentry(struct dentry *dentry, const char *name)
-+{
-+ int len;
-+
-+ len = strlen(name);
-+ if (dentry->d_name.len != len)
-+ return 0;
-+ if (strncmp(dentry->d_name.name, name, len))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into ext3_lookup and returns 1 the file
-+ * name is __iopen__ and dentry has been filled in appropriately.
-+ */
-+int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
-+{
-+ struct inode *inode;
-+
-+ if (dir->i_ino != EXT3_ROOT_INO ||
-+ !test_opt(dir->i_sb, IOPEN) ||
-+ !match_dentry(dentry, "__iopen__"))
-+ return 0;
-+
-+ inode = iget(dir->i_sb, EXT3_BAD_INO);
-+
-+ if (!inode)
-+ return 0;
-+ d_add(dentry, inode);
-+ return 1;
-+}
-+
-+/*
-+ * This function is spliced into read_inode; it returns 1 if inode
-+ * number is the one for /__iopen__, in which case the inode is filled
-+ * in appropriately. Otherwise, this fuction returns 0.
-+ */
-+int ext3_iopen_get_inode(struct inode *inode)
-+{
-+ if (inode->i_ino != EXT3_BAD_INO)
-+ return 0;
-+
-+ inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
-+ if (test_opt(inode->i_sb, IOPEN_NOPRIV))
-+ inode->i_mode |= 0777;
-+ inode->i_uid = 0;
-+ inode->i_gid = 0;
-+ inode->i_nlink = 1;
-+ inode->i_size = 4096;
-+ inode->i_atime = CURRENT_TIME;
-+ inode->i_ctime = CURRENT_TIME;
-+ inode->i_mtime = CURRENT_TIME;
-+ EXT3_I(inode)->i_dtime = 0;
-+ inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
-+ * (for stat), not the fs block
-+ * size */
-+ inode->i_blocks = 0;
-+ inode->i_version = 1;
-+ inode->i_generation = 0;
-+
-+ inode->i_op = &iopen_inode_operations;
-+ inode->i_fop = &iopen_file_operations;
-+ inode->i_mapping->a_ops = 0;
-+
-+ return 1;
-+}
-Index: linux-2.6.12-rc6/fs/ext3/iopen.h
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/iopen.h 2005-06-14 16:14:33.534835845 +0200
-+++ linux-2.6.12-rc6/fs/ext3/iopen.h 2005-06-14 16:14:33.633468657 +0200
-@@ -0,0 +1,15 @@
-+/*
-+ * iopen.h
-+ *
-+ * Special support for opening files by inode number.
-+ *
-+ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
-+ *
-+ * This file may be redistributed under the terms of the GNU General
-+ * Public License.
-+ */
-+
-+extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
-+extern int ext3_iopen_get_inode(struct inode *inode);
-+extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
-+ struct inode *inode, int rehash);
-Index: linux-2.6.12-rc6/fs/ext3/namei.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/namei.c 2005-06-14 16:01:14.701837819 +0200
-+++ linux-2.6.12-rc6/fs/ext3/namei.c 2005-06-14 16:14:33.644210844 +0200
-@@ -37,6 +37,7 @@
- #include <linux/buffer_head.h>
- #include <linux/smp_lock.h>
- #include "xattr.h"
-+#include "iopen.h"
- #include "acl.h"
-
- /*
-@@ -985,6 +986,9 @@
- if (dentry->d_name.len > EXT3_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
-+ if (ext3_check_for_iopen(dir, dentry))
-+ return NULL;
-+
- bh = ext3_find_entry(dentry, &de);
- inode = NULL;
- if (bh) {
-@@ -995,10 +999,8 @@
- if (!inode)
- return ERR_PTR(-EACCES);
- }
-- if (inode)
-- return d_splice_alias(inode, dentry);
-- d_add(dentry, inode);
-- return NULL;
-+
-+ return iopen_connect_dentry(dentry, inode, 1);
- }
-
-
-@@ -2042,10 +2044,6 @@
- inode->i_nlink);
- inode->i_version++;
- inode->i_nlink = 0;
-- /* There's no need to set i_disksize: the fact that i_nlink is
-- * zero will ensure that the right thing happens during any
-- * recovery. */
-- inode->i_size = 0;
- ext3_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- ext3_mark_inode_dirty(handle, inode);
-@@ -2168,6 +2166,23 @@
- return err;
- }
-
-+/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
-+static int ext3_add_link(handle_t *handle, struct dentry *dentry,
-+ struct inode *inode)
-+{
-+ int err = ext3_add_entry(handle, dentry, inode);
-+ if (!err) {
-+ err = ext3_mark_inode_dirty(handle, inode);
-+ if (err == 0) {
-+ dput(iopen_connect_dentry(dentry, inode, 0));
-+ return 0;
-+ }
-+ }
-+ ext3_dec_count(handle, inode);
-+ iput(inode);
-+ return err;
-+}
-+
- static int ext3_link (struct dentry * old_dentry,
- struct inode * dir, struct dentry *dentry)
- {
-@@ -2191,7 +2206,8 @@
- ext3_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
-
-- err = ext3_add_nondir(handle, dentry, inode);
-+ err = ext3_add_link(handle, dentry, inode);
-+ ext3_orphan_del(handle, inode);
- ext3_journal_stop(handle);
- if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
-Index: linux-2.6.12-rc6/fs/ext3/super.c
-===================================================================
---- linux-2.6.12-rc6.orig/fs/ext3/super.c 2005-06-14 16:01:16.287775299 +0200
-+++ linux-2.6.12-rc6/fs/ext3/super.c 2005-06-14 16:14:33.656906156 +0200
-@@ -590,6 +590,7 @@
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
-+ Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
- };
-
- static match_table_t tokens = {
-@@ -638,6 +639,9 @@
- {Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
- {Opt_ignore, "usrquota"},
-+ {Opt_iopen, "iopen"},
-+ {Opt_noiopen, "noiopen"},
-+ {Opt_iopen_nopriv, "iopen_nopriv"},
- {Opt_barrier, "barrier=%u"},
- {Opt_err, NULL},
- {Opt_resize, "resize"},
-@@ -921,6 +925,18 @@
- else
- clear_opt(sbi->s_mount_opt, BARRIER);
- break;
-+ case Opt_iopen:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_noiopen:
-+ clear_opt (sbi->s_mount_opt, IOPEN);
-+ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
-+ case Opt_iopen_nopriv:
-+ set_opt (sbi->s_mount_opt, IOPEN);
-+ set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
-+ break;
- case Opt_ignore:
- break;
- case Opt_resize:
-Index: linux-2.6.12-rc6/include/linux/ext3_fs.h
-===================================================================
---- linux-2.6.12-rc6.orig/include/linux/ext3_fs.h 2005-06-14 16:01:14.709650318 +0200
-+++ linux-2.6.12-rc6/include/linux/ext3_fs.h 2005-06-14 16:28:38.452794245 +0200
-@@ -358,6 +358,8 @@
- #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
- #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
- #define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
-+#define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
-+#define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
-
- /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
- #ifndef _LINUX_EXT2_FS_H
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-rhel4.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-rhel4.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.9-rhel4.patch
-ext3-mballoc2-2.6.9-rhel4.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.15.patch
-ext3-mballoc2-2.6-fc5.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-2.6.15.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-rhel4.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-rhel4.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.9-rhel4.patch
-ext3-mballoc2-2.6.9-rhel4.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.9-rhel4.patch
-ext3-check-jbd-errors-2.6.9.patch
-ext3-nanosecond-2.6-rhel4.patch
-ext3-extents-bug11324.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.16-sles10.patch
-ext3-mballoc2-2.6-fc5.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-2.6.15.patch
-ext3-disable-write-bar-by-default-2.6-sles10.patch
-ext3-nanosecond-2.6-sles10.patch
-ext3-inode-version-2.6-sles10.patch
+++ /dev/null
-ext3-wantedi-2.6-suse.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-suse.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-suse.patch
-export-ext3-2.6-suse.patch
-ext3-include-fixes-2.6-suse.patch
-ext3-extents-2.6.5.patch
-ext3-mballoc2-2.6-suse.patch
-ext3-nlinks-2.6.7.patch
-ext3-rename-reserve-2.6-suse.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.5-suse.patch
-ext3-check-jbd-errors-2.6.5.patch
-ext3-nanosecond-2.6-suse.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6.12.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.12.patch
-ext3-mballoc2-2.6.12.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-htree-dot-2.6.patch
-ext3-external-journal-2.6.12.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.12.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.18-vanilla.patch
-ext3-mballoc2-2.6.18-vanilla.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-2.6.15.patch
-ext3-nanosecond-2.6.18-vanilla.patch
-ext3-inode-version-2.6.18-vanilla.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-rhel4.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-rhel4.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.9-rhel4.patch
-ext3-mballoc2-2.6.9-rhel4.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.15.patch
-ext3-mballoc2-2.6-fc5.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-2.6.15.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-rhel4.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-rhel4.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.9-rhel4.patch
-ext3-extents-fixes-2.6.9-rhel4.patch
-ext3-extents-multiblock-directio-2.6.9-rhel4.patch
-ext3-extents-search-2.6.9-rhel4.patch
-ext3-mballoc3-core.patch
-ext3-mballoc3-rhel4.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.9-rhel4.patch
-ext3-check-jbd-errors-2.6.9.patch
-ext3-uninit-2.6.9.patch
-ext3-nanosecond-2.6-rhel4.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.16-sles10.patch
-ext3-extents-fixes-2.6.9-rhel4.patch
-ext3-extents-multiblock-directio-2.6.9-rhel4.patch
-ext3-extents-search-2.6.9-rhel4.patch
-ext3-mballoc3-core.patch
-ext3-mballoc3-sles10.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-sles10.patch
-ext3-disable-write-bar-by-default-2.6-sles10.patch
-ext3-uninit-2.6-sles10.patch
-ext3-nanosecond-2.6-sles10.patch
-ext3-inode-version-2.6-sles10.patch
+++ /dev/null
-ext3-wantedi-2.6-suse.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-suse.patch
-export_symbols-ext3-2.6-suse.patch
-ext3-map_inode_page-2.6-suse.patch
-ext3-ea-in-inode-2.6-suse.patch
-export-ext3-2.6-suse.patch
-ext3-include-fixes-2.6-suse.patch
-ext3-extents-2.6.5.patch
-ext3-extents-fixes-2.6.9-rhel4.patch
-ext3-extents-multiblock-directio-2.6.5-suse.patch
-ext3-extents-search-2.6.9-rhel4.patch
-ext3-mballoc3-core.patch
-ext3-mballoc3-suse.patch
-ext3-nlinks-2.6.7.patch
-ext3-rename-reserve-2.6-suse.patch
-ext3-ialloc-2.6.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.5-suse.patch
-ext3-check-jbd-errors-2.6.5.patch
-ext3-uninit-2.6-suse.patch
-ext3-nanosecond-2.6-suse.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6.12.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.12.patch
-ext3-mballoc2-2.6.12.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-htree-dot-2.6.patch
-ext3-external-journal-2.6.12.patch
-ext3-lookup-dotdot-2.6.9.patch
-ext3-sector_t-overflow-2.6.12.patch
+++ /dev/null
-ext3-wantedi-2.6-rhel4.patch
-ext3-san-jdike-2.6-suse.patch
-iopen-2.6-fc5.patch
-ext3-map_inode_page-2.6-suse.patch
-export-ext3-2.6-rhel4.patch
-ext3-include-fixes-2.6-rhel4.patch
-ext3-extents-2.6.18-vanilla.patch
-ext3-mballoc2-2.6.18-vanilla.patch
-ext3-nlinks-2.6.9.patch
-ext3-ialloc-2.6.patch
-ext3-remove-cond_resched-calls-2.6.12.patch
-ext3-filterdata-2.6.15.patch
-ext3-nanosecond-2.6.18-vanilla.patch
-ext3-inode-version-2.6.18-vanilla.patch
kernel. The patches in the 2.6-suse-newer series are patches that
have been created since the SP1 kernel was released and should be
applied to the already-patched SP1 kernel.
-
-NB - The patches in the ldiskfs series should not be applied to the kernel.
- They are instead applied by the lustre build process to create the
- ldiskfs kernel module instead of modifying the core ext3 code.
-
-