Whamcloud - gitweb
Branch HEAD
authoryangsheng <yangsheng>
Mon, 19 Nov 2007 07:16:56 +0000 (07:16 +0000)
committeryangsheng <yangsheng>
Mon, 19 Nov 2007 07:16:56 +0000 (07:16 +0000)
b=13397
i=adilger
i=johann

Add support for vanilla-2.6.22 kernel.

14 files changed:
ldiskfs/ChangeLog
ldiskfs/configure.ac
ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-mballoc3-core-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch [new file with mode: 0644]
ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series [new file with mode: 0644]

index b088ea2..9462d7e 100644 (file)
@@ -1,6 +1,10 @@
 tbd  Cluster File Systems, Inc. <info@clusterfs.com>
        * version 3.0.4
 
+Severity   : normal
+Bugzilla   : 13397
+Description: Add support for vanilla-2.6.22 kernel.
+
 --------------------------------------------------------------------------------
 
 2007-10-26  Cluster File Systems, Inc. <info@clusterfs.com>
index fa3d02d..1455213 100644 (file)
@@ -97,6 +97,7 @@ case $LINUXRELEASE in
 2.6.18-*el5*) LDISKFS_SERIES="2.6-rhel5.series";;
 2.6.18-*prep*) LDISKFS_SERIES="2.6-rhel5.series";;
 2.6.18*) LDISKFS_SERIES="2.6.18-vanilla.series";;
+2.6.22*) LDISKFS_SERIES="2.6.22-vanilla.series";;
 *) AC_MSG_WARN([Unknown kernel version $LINUXRELEASE, fix ldiskfs/configure.ac])
 esac
 AC_MSG_RESULT([$LDISKFS_SERIES])
diff --git a/ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..11e889f
--- /dev/null
@@ -0,0 +1,2896 @@
+Index: linux-2.6.18.8/fs/ext3/dir.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/dir.c  2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/fs/ext3/dir.c       2007-07-17 09:18:14.000000000 +0200
+@@ -131,8 +131,7 @@ static int ext3_readdir(struct file * fi
+               struct buffer_head *bh = NULL;
+               map_bh.b_state = 0;
+-              err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+-                                              &map_bh, 0, 0);
++              err = ext3_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
+               if (err > 0) {
+                       page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
+                               &filp->f_ra,
+Index: linux-2.6.18.8/fs/ext3/extents.c
+===================================================================
+--- /dev/null  1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.18.8/fs/ext3/extents.c   2007-07-17 11:08:59.000000000 +0200
+@@ -0,0 +1,2272 @@
++/*
++ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
++ * Written by Alex Tomas <alex@clusterfs.com>
++ *
++ * Architecture independence:
++ *   Copyright (c) 2005, Bull S.A.
++ *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public Licens
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
++ */
++
++/*
++ * Extents support for EXT3
++ *
++ * TODO:
++ *   - ext3*_error() should be used in some situations
++ *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
++ *   - smart tree reduction
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/time.h>
++#include <linux/ext3_jbd.h>
++#include <linux/jbd.h>
++#include <linux/smp_lock.h>
++#include <linux/highuid.h>
++#include <linux/pagemap.h>
++#include <linux/quotaops.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/ext3_extents.h>
++#include <asm/uaccess.h>
++
++
++static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
++{
++      int err;
++
++      if (handle->h_buffer_credits > needed)
++              return handle;
++      if (!ext3_journal_extend(handle, needed))
++              return handle;
++      err = ext3_journal_restart(handle, needed);
++
++      return handle;
++}
++
++/*
++ * could return:
++ *  - EROFS
++ *  - ENOMEM
++ */
++static int ext3_ext_get_access(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *path)
++{
++      if (path->p_bh) {
++              /* path points to block */
++              return ext3_journal_get_write_access(handle, path->p_bh);
++      }
++      /* path points to leaf/index in inode body */
++      /* we use in-core data, no need to protect them */
++      return 0;
++}
++
++/*
++ * could return:
++ *  - EROFS
++ *  - ENOMEM
++ *  - EIO
++ */
++static int ext3_ext_dirty(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *path)
++{
++      int err;
++      if (path->p_bh) {
++              /* path points to block */
++              err = ext3_journal_dirty_metadata(handle, path->p_bh);
++      } else {
++              /* path points to leaf/index in inode body */
++              err = ext3_mark_inode_dirty(handle, inode);
++      }
++      return err;
++}
++
++static int ext3_ext_find_goal(struct inode *inode,
++                            struct ext3_ext_path *path,
++                            unsigned long block)
++{
++      struct ext3_inode_info *ei = EXT3_I(inode);
++      unsigned long bg_start;
++      unsigned long colour;
++      int depth;
++
++      if (path) {
++              struct ext3_extent *ex;
++              depth = path->p_depth;
++
++              /* try to predict block placement */
++              if ((ex = path[depth].p_ext))
++                      return le32_to_cpu(ex->ee_start)
++                                      + (block - le32_to_cpu(ex->ee_block));
++
++              /* it looks index is empty
++               * try to find starting from index itself */
++              if (path[depth].p_bh)
++                      return path[depth].p_bh->b_blocknr;
++      }
++
++      /* OK. use inode's group */
++      bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
++              le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
++      colour = (current->pid % 16) *
++                      (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
++      return bg_start + colour + block;
++}
++
++static int
++ext3_ext_new_block(handle_t *handle, struct inode *inode,
++                      struct ext3_ext_path *path,
++                      struct ext3_extent *ex, int *err)
++{
++      int goal, newblock;
++
++      goal = ext3_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
++      newblock = ext3_new_block(handle, inode, goal, err);
++      return newblock;
++}
++
++static inline int ext3_ext_space_block(struct inode *inode)
++{
++      int size;
++
++      size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
++                      / sizeof(struct ext3_extent);
++#ifdef AGRESSIVE_TEST
++      if (size > 6)
++              size = 6;
++#endif
++      return size;
++}
++
++static inline int ext3_ext_space_block_idx(struct inode *inode)
++{
++      int size;
++
++      size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
++                      / sizeof(struct ext3_extent_idx);
++#ifdef AGRESSIVE_TEST
++      if (size > 5)
++              size = 5;
++#endif
++      return size;
++}
++
++static inline int ext3_ext_space_root(struct inode *inode)
++{
++      int size;
++
++      size = sizeof(EXT3_I(inode)->i_data);
++      size -= sizeof(struct ext3_extent_header);
++      size /= sizeof(struct ext3_extent);
++#ifdef AGRESSIVE_TEST
++      if (size > 3)
++              size = 3;
++#endif
++      return size;
++}
++
++static inline int ext3_ext_space_root_idx(struct inode *inode)
++{
++      int size;
++
++      size = sizeof(EXT3_I(inode)->i_data);
++      size -= sizeof(struct ext3_extent_header);
++      size /= sizeof(struct ext3_extent_idx);
++#ifdef AGRESSIVE_TEST
++      if (size > 4)
++              size = 4;
++#endif
++      return size;
++}
++
++static inline int
++ext3_ext_max_entries(struct inode *inode, int depth)
++{
++      int max;
++
++      if (depth == ext_depth(inode)) {
++              if (depth == 0)
++                      max = ext3_ext_space_root(inode);
++              else
++                      max = ext3_ext_space_root_idx(inode);
++      } else {
++              if (depth == 0)
++                      max = ext3_ext_space_block(inode);
++              else
++                      max = ext3_ext_space_block_idx(inode);
++      }
++
++      return max;
++}
++
++static int __ext3_ext_check_header(const char *function, struct inode *inode,
++                                      struct ext3_extent_header *eh,
++                                      int depth)
++{
++      const char *error_msg = NULL;
++      int max = 0;
++
++      if (unlikely(eh->eh_magic != cpu_to_le16(EXT3_EXT_MAGIC))) {
++              error_msg = "invalid magic";
++              goto corrupted;
++      }
++      if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
++              error_msg = "unexpected eh_depth";
++              goto corrupted;
++      }
++      if (unlikely(eh->eh_max == 0)) {
++              error_msg = "invalid eh_max";
++              goto corrupted;
++      }
++      max = ext3_ext_max_entries(inode, depth);
++#ifdef AGRESSIVE_TEST
++      if (eh->eh_max > 3) {
++              /* inode probably got extent without defining AGRESSIVE_TEST */
++              max = eh->eh_max;
++      }
++#endif
++      if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
++              error_msg = "too large eh_max";
++              goto corrupted;
++      }
++      if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
++              error_msg = "invalid eh_entries";
++              goto corrupted;
++      }
++      return 0;
++
++corrupted:
++      ext3_error(inode->i_sb, function,
++                      "bad header in inode #%lu: %s - magic %x, "
++                      "entries %u, max %u(%u), depth %u(%u)",
++                      inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
++                      le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
++                      max, le16_to_cpu(eh->eh_depth), depth);
++
++      return -EIO;
++}
++
++#define ext3_ext_check_header(inode,eh,depth) \
++      __ext3_ext_check_header(__FUNCTION__,inode,eh,depth)
++
++#ifdef EXT_DEBUG
++static void ext3_ext_show_path(struct inode *inode, struct ext3_ext_path *path)
++{
++      int k, l = path->p_depth;
++
++      ext_debug(inode, "path:");
++      for (k = 0; k <= l; k++, path++) {
++              if (path->p_idx) {
++                ext_debug(inode, "  %d->%d", le32_to_cpu(path->p_idx->ei_block),
++                          le32_to_cpu(path->p_idx->ei_leaf));
++              } else if (path->p_ext) {
++                      ext_debug(inode, "  %d:%d:%d",
++                                le32_to_cpu(path->p_ext->ee_block),
++                                le16_to_cpu(path->p_ext->ee_len),
++                                le32_to_cpu(path->p_ext->ee_start));
++              } else
++                      ext_debug(inode, "  []");
++      }
++      ext_debug(inode, "\n");
++}
++
++static void ext3_ext_show_leaf(struct inode *inode, struct ext3_ext_path *path)
++{
++      int depth = ext_depth(inode);
++      struct ext3_extent_header *eh;
++      struct ext3_extent *ex;
++      int i;
++
++      if (!path)
++              return;
++
++      eh = path[depth].p_hdr;
++      ex = EXT_FIRST_EXTENT(eh);
++
++      for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
++              ext_debug(inode, "%d:%d:%d ", le32_to_cpu(ex->ee_block),
++                        le16_to_cpu(ex->ee_len),
++                        le32_to_cpu(ex->ee_start));
++      }
++      ext_debug(inode, "\n");
++}
++#else
++#define ext3_ext_show_path(inode,path)
++#define ext3_ext_show_leaf(inode,path)
++#endif
++
++static void ext3_ext_drop_refs(struct ext3_ext_path *path)
++{
++      int depth = path->p_depth;
++      int i;
++
++      for (i = 0; i <= depth; i++, path++)
++              if (path->p_bh) {
++                      brelse(path->p_bh);
++                      path->p_bh = NULL;
++              }
++}
++
++/*
++ * binary search for closest index by given block
++ * the header must be checked before calling this
++ */
++static void
++ext3_ext_binsearch_idx(struct inode *inode, struct ext3_ext_path *path, int block)
++{
++      struct ext3_extent_header *eh = path->p_hdr;
++      struct ext3_extent_idx *r, *l, *m;
++
++      ext_debug(inode, "binsearch for %d(idx):  ", block);
++
++      l = EXT_FIRST_INDEX(eh) + 1;
++      r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
++      while (l <= r) {
++              m = l + (r - l) / 2;
++              if (block < le32_to_cpu(m->ei_block))
++                      r = m - 1;
++              else
++                      l = m + 1;
++              ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
++                              m, m->ei_block, r, r->ei_block);
++      }
++
++      path->p_idx = l - 1;
++      ext_debug(inode, "  -> %d->%d ", le32_to_cpu(path->p_idx->ei_block),
++                le32_to_cpu(path->p_idx->ei_leaf));
++
++#ifdef CHECK_BINSEARCH
++      {
++              struct ext3_extent_idx *chix, *ix;
++              int k;
++
++              chix = ix = EXT_FIRST_INDEX(eh);
++              for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
++                if (k != 0 &&
++                    le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
++                              printk("k=%d, ix=0x%p, first=0x%p\n", k,
++                                      ix, EXT_FIRST_INDEX(eh));
++                              printk("%u <= %u\n",
++                                     le32_to_cpu(ix->ei_block),
++                                     le32_to_cpu(ix[-1].ei_block));
++                      }
++                      BUG_ON(k && le32_to_cpu(ix->ei_block)
++                                         <= le32_to_cpu(ix[-1].ei_block));
++                      if (block < le32_to_cpu(ix->ei_block))
++                              break;
++                      chix = ix;
++              }
++              BUG_ON(chix != path->p_idx);
++      }
++#endif
++
++}
++
++/*
++ * binary search for closest extent by given block
++ * the header must be checked before calling this
++ */
++static void
++ext3_ext_binsearch(struct inode *inode, struct ext3_ext_path *path, int block)
++{
++      struct ext3_extent_header *eh = path->p_hdr;
++      struct ext3_extent *r, *l, *m;
++
++      if (eh->eh_entries == 0) {
++              /*
++               * this leaf is empty yet:
++               *  we get such a leaf in split/add case
++               */
++              return;
++      }
++
++      ext_debug(inode, "binsearch for %d:  ", block);
++
++      l = EXT_FIRST_EXTENT(eh) + 1;
++      r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
++
++      while (l <= r) {
++              m = l + (r - l) / 2;
++              if (block < le32_to_cpu(m->ee_block))
++                      r = m - 1;
++              else
++                      l = m + 1;
++              ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
++                              m, m->ee_block, r, r->ee_block);
++      }
++
++      path->p_ext = l - 1;
++      ext_debug(inode, "  -> %d:%d:%d ",
++                      le32_to_cpu(path->p_ext->ee_block),
++                      le32_to_cpu(path->p_ext->ee_start),
++                      le16_to_cpu(path->p_ext->ee_len));
++
++#ifdef CHECK_BINSEARCH
++      {
++              struct ext3_extent *chex, *ex;
++              int k;
++
++              chex = ex = EXT_FIRST_EXTENT(eh);
++              for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
++                      BUG_ON(k && le32_to_cpu(ex->ee_block)
++                                        <= le32_to_cpu(ex[-1].ee_block));
++                      if (block < le32_to_cpu(ex->ee_block))
++                              break;
++                      chex = ex;
++              }
++              BUG_ON(chex != path->p_ext);
++      }
++#endif
++
++}
++
++int ext3_ext_tree_init(handle_t *handle, struct inode *inode)
++{
++      struct ext3_extent_header *eh;
++
++      eh = ext_inode_hdr(inode);
++      eh->eh_depth = 0;
++      eh->eh_entries = 0;
++      eh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
++      eh->eh_max = cpu_to_le16(ext3_ext_space_root(inode));
++      ext3_mark_inode_dirty(handle, inode);
++      ext3_ext_invalidate_cache(inode);
++      return 0;
++}
++
++struct ext3_ext_path *
++ext3_ext_find_extent(struct inode *inode, int block, struct ext3_ext_path *path)
++{
++      struct ext3_extent_header *eh;
++      struct buffer_head *bh;
++      short int depth, i, ppos = 0, alloc = 0;
++
++      eh = ext_inode_hdr(inode);
++      i = depth = ext_depth(inode);
++      if (ext3_ext_check_header(inode, eh, depth))
++              return ERR_PTR(-EIO);
++
++      /* account possible depth increase */
++      if (!path) {
++              path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
++                              GFP_NOFS);
++              if (!path)
++                      return ERR_PTR(-ENOMEM);
++              alloc = 1;
++      }
++      memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
++      path[0].p_hdr = eh;
++
++      /* walk through the tree */
++      while (i) {
++              ext_debug(inode, "depth %d: num %d, max %d\n",
++                        ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
++
++              ext3_ext_binsearch_idx(inode, path + ppos, block);
++              path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf);
++              path[ppos].p_depth = i;
++              path[ppos].p_ext = NULL;
++
++              bh = sb_bread(inode->i_sb, path[ppos].p_block);
++              if (!bh)
++                      goto err;
++
++              eh = ext_block_hdr(bh);
++              ppos++;
++              BUG_ON(ppos > depth);
++              path[ppos].p_bh = bh;
++              path[ppos].p_hdr = eh;
++              i--;
++
++              if (ext3_ext_check_header(inode, eh, i))
++                      goto err;
++      }
++
++      path[ppos].p_depth = i;
++      path[ppos].p_hdr = eh;
++      path[ppos].p_ext = NULL;
++      path[ppos].p_idx = NULL;
++
++      /* find extent */
++      ext3_ext_binsearch(inode, path + ppos, block);
++
++      ext3_ext_show_path(inode, path);
++
++      return path;
++
++err:
++      ext3_ext_drop_refs(path);
++      if (alloc)
++              kfree(path);
++      return ERR_PTR(-EIO);
++}
++
++/*
++ * insert new index [logical;ptr] into the block at cupr
++ * it check where to insert: before curp or after curp
++ */
++static int ext3_ext_insert_index(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *curp,
++                              int logical, int ptr)
++{
++      struct ext3_extent_idx *ix;
++      int len, err;
++
++      if ((err = ext3_ext_get_access(handle, inode, curp)))
++              return err;
++
++      BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
++      len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
++      if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
++              /* insert after */
++              if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
++                      len = (len - 1) * sizeof(struct ext3_extent_idx);
++                      len = len < 0 ? 0 : len;
++                      ext_debug(inode, "insert new index %d after: %d. "
++                                      "move %d from 0x%p to 0x%p\n",
++                                      logical, ptr, len,
++                                      (curp->p_idx + 1), (curp->p_idx + 2));
++                      memmove(curp->p_idx + 2, curp->p_idx + 1, len);
++              }
++              ix = curp->p_idx + 1;
++      } else {
++              /* insert before */
++              len = len * sizeof(struct ext3_extent_idx);
++              len = len < 0 ? 0 : len;
++              ext_debug(inode, "insert new index %d before: %d. "
++                              "move %d from 0x%p to 0x%p\n",
++                              logical, ptr, len,
++                              curp->p_idx, (curp->p_idx + 1));
++              memmove(curp->p_idx + 1, curp->p_idx, len);
++              ix = curp->p_idx;
++      }
++
++      ix->ei_block = cpu_to_le32(logical);
++      ix->ei_leaf = cpu_to_le32(ptr);
++      ix->ei_leaf_hi = ix->ei_unused = 0;
++      curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
++
++      BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
++                           > le16_to_cpu(curp->p_hdr->eh_max));
++      BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
++
++      err = ext3_ext_dirty(handle, inode, curp);
++      ext3_std_error(inode->i_sb, err);
++
++      return err;
++}
++
++/*
++ * routine inserts new subtree into the path, using free index entry
++ * at depth 'at:
++ *  - allocates all needed blocks (new leaf and all intermediate index blocks)
++ *  - makes decision where to split
++ *  - moves remaining extens and index entries (right to the split point)
++ *    into the newly allocated blocks
++ *  - initialize subtree
++ */
++static int ext3_ext_split(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *path,
++                              struct ext3_extent *newext, int at)
++{
++      struct buffer_head *bh = NULL;
++      int depth = ext_depth(inode);
++      struct ext3_extent_header *neh;
++      struct ext3_extent_idx *fidx;
++      struct ext3_extent *ex;
++      int i = at, k, m, a;
++      unsigned long newblock, oldblock;
++      __le32 border;
++      int *ablocks = NULL; /* array of allocated blocks */
++      int err = 0;
++
++      /* make decision: where to split? */
++      /* FIXME: now desicion is simplest: at current extent */
++
++      /* if current leaf will be splitted, then we should use
++       * border from split point */
++      BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
++      if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
++              border = path[depth].p_ext[1].ee_block;
++              ext_debug(inode, "leaf will be splitted."
++                              " next leaf starts at %d\n",
++                                le32_to_cpu(border));
++      } else {
++              border = newext->ee_block;
++              ext_debug(inode, "leaf will be added."
++                              " next leaf starts at %d\n",
++                              le32_to_cpu(border));
++      }
++
++      /*
++       * if error occurs, then we break processing
++       * and turn filesystem read-only. so, index won't
++       * be inserted and tree will be in consistent
++       * state. next mount will repair buffers too
++       */
++
++      /*
++       * get array to track all allocated blocks
++       * we need this to handle errors and free blocks
++       * upon them
++       */
++      ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
++      if (!ablocks)
++              return -ENOMEM;
++      memset(ablocks, 0, sizeof(unsigned long) * depth);
++
++      /* allocate all needed blocks */
++      ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
++      for (a = 0; a < depth - at; a++) {
++              newblock = ext3_ext_new_block(handle, inode, path, newext, &err);
++              if (newblock == 0)
++                      goto cleanup;
++              ablocks[a] = newblock;
++      }
++
++      /* initialize new leaf */
++      newblock = ablocks[--a];
++      BUG_ON(newblock == 0);
++      bh = sb_getblk(inode->i_sb, newblock);
++      if (!bh) {
++              err = -EIO;
++              goto cleanup;
++      }
++      lock_buffer(bh);
++
++      if ((err = ext3_journal_get_create_access(handle, bh)))
++              goto cleanup;
++
++      neh = ext_block_hdr(bh);
++      neh->eh_entries = 0;
++      neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));
++      neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
++      neh->eh_depth = 0;
++      ex = EXT_FIRST_EXTENT(neh);
++
++      /* move remain of path[depth] to the new leaf */
++      BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
++      /* start copy from next extent */
++      /* TODO: we could do it by single memmove */
++      m = 0;
++      path[depth].p_ext++;
++      while (path[depth].p_ext <=
++                      EXT_MAX_EXTENT(path[depth].p_hdr)) {
++              ext_debug(inode, "move %d:%d:%d in new leaf %lu\n",
++                              le32_to_cpu(path[depth].p_ext->ee_block),
++                              le32_to_cpu(path[depth].p_ext->ee_start),
++                              le16_to_cpu(path[depth].p_ext->ee_len),
++                              newblock);
++              /*memmove(ex++, path[depth].p_ext++,
++                              sizeof(struct ext3_extent));
++              neh->eh_entries++;*/
++              path[depth].p_ext++;
++              m++;
++      }
++      if (m) {
++              memmove(ex, path[depth].p_ext-m, sizeof(struct ext3_extent)*m);
++              neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
++      }
++
++      set_buffer_uptodate(bh);
++      unlock_buffer(bh);
++
++      if ((err = ext3_journal_dirty_metadata(handle, bh)))
++              goto cleanup;
++      brelse(bh);
++      bh = NULL;
++
++      /* correct old leaf */
++      if (m) {
++              if ((err = ext3_ext_get_access(handle, inode, path + depth)))
++                      goto cleanup;
++              path[depth].p_hdr->eh_entries =
++                   cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
++              if ((err = ext3_ext_dirty(handle, inode, path + depth)))
++                      goto cleanup;
++
++      }
++
++      /* create intermediate indexes */
++      k = depth - at - 1;
++      BUG_ON(k < 0);
++      if (k)
++              ext_debug(inode, "create %d intermediate indices\n", k);
++      /* insert new index into current index block */
++      /* current depth stored in i var */
++      i = depth - 1;
++      while (k--) {
++              oldblock = newblock;
++              newblock = ablocks[--a];
++              bh = sb_getblk(inode->i_sb, newblock);
++              if (!bh) {
++                      err = -EIO;
++                      goto cleanup;
++              }
++              lock_buffer(bh);
++
++              if ((err = ext3_journal_get_create_access(handle, bh)))
++                      goto cleanup;
++
++              neh = ext_block_hdr(bh);
++              neh->eh_entries = cpu_to_le16(1);
++              neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
++              neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));
++              neh->eh_depth = cpu_to_le16(depth - i);
++              fidx = EXT_FIRST_INDEX(neh);
++              fidx->ei_block = border;
++              fidx->ei_leaf = cpu_to_le32(oldblock);
++              fidx->ei_leaf_hi = fidx->ei_unused = 0;
++
++              ext_debug(inode, "int.index at %d (block %lu): %lu -> %lu\n", i,
++                              newblock, (unsigned long) le32_to_cpu(border),
++                              oldblock);
++              /* copy indexes */
++              m = 0;
++              path[i].p_idx++;
++
++              ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
++                              EXT_MAX_INDEX(path[i].p_hdr));
++              BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
++                              EXT_LAST_INDEX(path[i].p_hdr));
++              while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
++                      ext_debug(inode, "%d: move %d:%d in new index %lu\n", i,
++                                      le32_to_cpu(path[i].p_idx->ei_block),
++                                      le32_to_cpu(path[i].p_idx->ei_leaf),
++                                      newblock);
++                      /*memmove(++fidx, path[i].p_idx++,
++                                      sizeof(struct ext3_extent_idx));
++                      neh->eh_entries++;
++                      BUG_ON(neh->eh_entries > neh->eh_max);*/
++                      path[i].p_idx++;
++                      m++;
++              }
++              if (m) {
++                      memmove(++fidx, path[i].p_idx - m,
++                              sizeof(struct ext3_extent_idx) * m);
++                      neh->eh_entries =
++                              cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
++              }
++              set_buffer_uptodate(bh);
++              unlock_buffer(bh);
++
++              if ((err = ext3_journal_dirty_metadata(handle, bh)))
++                      goto cleanup;
++              brelse(bh);
++              bh = NULL;
++
++              /* correct old index */
++              if (m) {
++                      err = ext3_ext_get_access(handle, inode, path + i);
++                      if (err)
++                              goto cleanup;
++                      path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
++                      err = ext3_ext_dirty(handle, inode, path + i);
++                      if (err)
++                              goto cleanup;
++              }
++
++              i--;
++      }
++
++      /* insert new index */
++      if (err)
++              goto cleanup;
++
++      err = ext3_ext_insert_index(handle, inode, path + at,
++                                  le32_to_cpu(border), newblock);
++
++cleanup:
++      if (bh) {
++              if (buffer_locked(bh))
++                      unlock_buffer(bh);
++              brelse(bh);
++      }
++
++      if (err) {
++              /* free all allocated blocks in error case */
++              for (i = 0; i < depth; i++) {
++                      if (!ablocks[i])
++                              continue;
++                      ext3_free_blocks(handle, inode, ablocks[i], 1);
++              }
++      }
++      kfree(ablocks);
++
++      return err;
++}
++
++/*
++ * routine implements tree growing procedure:
++ *  - allocates new block
++ *  - moves top-level data (index block or leaf) into the new block
++ *  - initialize new top-level, creating index that points to the
++ *    just created block
++ */
++static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode,
++                                      struct ext3_ext_path *path,
++                                      struct ext3_extent *newext)
++{
++      struct ext3_ext_path *curp = path;
++      struct ext3_extent_header *neh;
++      struct ext3_extent_idx *fidx;
++      struct buffer_head *bh;
++      unsigned long newblock;
++      int err = 0;
++
++      newblock = ext3_ext_new_block(handle, inode, path, newext, &err);
++      if (newblock == 0)
++              return err;
++
++      bh = sb_getblk(inode->i_sb, newblock);
++      if (!bh) {
++              err = -EIO;
++              ext3_std_error(inode->i_sb, err);
++              return err;
++      }
++      lock_buffer(bh);
++
++      if ((err = ext3_journal_get_create_access(handle, bh))) {
++              unlock_buffer(bh);
++              goto out;
++      }
++
++      /* move top-level index/leaf into new block */
++      memmove(bh->b_data, curp->p_hdr, sizeof(EXT3_I(inode)->i_data));
++
++      /* set size of new block */
++      neh = ext_block_hdr(bh);
++      /* old root could have indexes or leaves
++       * so calculate e_max right way */
++      if (ext_depth(inode))
++        neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));
++      else
++        neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));
++      neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
++      set_buffer_uptodate(bh);
++      unlock_buffer(bh);
++
++      if ((err = ext3_journal_dirty_metadata(handle, bh)))
++              goto out;
++
++      /* create index in new top-level index: num,max,pointer */
++      if ((err = ext3_ext_get_access(handle, inode, curp)))
++              goto out;
++
++      curp->p_hdr->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);
++      curp->p_hdr->eh_max = cpu_to_le16(ext3_ext_space_root_idx(inode));
++      curp->p_hdr->eh_entries = cpu_to_le16(1);
++      curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
++      /* FIXME: it works, but actually path[0] can be index */
++      curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
++      curp->p_idx->ei_leaf = cpu_to_le32(newblock);
++      curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
++
++      neh = ext_inode_hdr(inode);
++      fidx = EXT_FIRST_INDEX(neh);
++      ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n",
++                le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
++                le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf));
++
++      neh->eh_depth = cpu_to_le16(path->p_depth + 1);
++      err = ext3_ext_dirty(handle, inode, curp);
++out:
++      brelse(bh);
++
++      return err;
++}
++
++/*
++ * routine finds empty index and adds new leaf. if no free index found
++ * then it requests in-depth growing
++ */
++static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode,
++                                      struct ext3_ext_path *path,
++                                      struct ext3_extent *newext)
++{
++      struct ext3_ext_path *curp;
++      int depth, i, err = 0;
++
++repeat:
++      i = depth = ext_depth(inode);
++
++      /* walk up to the tree and look for free index entry */
++      curp = path + depth;
++      while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
++              i--;
++              curp--;
++      }
++
++      /* we use already allocated block for index block
++       * so, subsequent data blocks should be contigoues */
++      if (EXT_HAS_FREE_INDEX(curp)) {
++              /* if we found index with free entry, then use that
++               * entry: create all needed subtree and add new leaf */
++              err = ext3_ext_split(handle, inode, path, newext, i);
++              if (err)
++                      goto out;
++
++              /* refill path */
++              ext3_ext_drop_refs(path);
++              path = ext3_ext_find_extent(inode,
++                                          le32_to_cpu(newext->ee_block),
++                                          path);
++              if (IS_ERR(path))
++                      err = PTR_ERR(path);
++      } else {
++              /* tree is full, time to grow in depth */
++              err = ext3_ext_grow_indepth(handle, inode, path, newext);
++              if (err)
++                      goto out;
++
++              /* refill path */
++              ext3_ext_drop_refs(path);
++              path = ext3_ext_find_extent(inode,
++                                          le32_to_cpu(newext->ee_block),
++                                          path);
++              if (IS_ERR(path)) {
++                      err = PTR_ERR(path);
++                      goto out;
++              }
++
++              /*
++               * only first (depth 0 -> 1) produces free space
++               * in all other cases we have to split growed tree
++               */
++              depth = ext_depth(inode);
++              if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
++                      /* now we need split */
++                      goto repeat;
++              }
++      }
++
++out:
++      return err;
++}
++
++/*
++ * search the closest allocated block to the left for *logical
++ * and returns it at @logical + it's physical address at @phys
++ * if *logical is the smallest allocated block, the function
++ * returns 0 at @phys
++ * return value contains 0 (success) or error code
++ */
++int
++ext3_ext_search_left(struct inode *inode, struct ext3_ext_path *path,
++                      unsigned long *logical, unsigned long *phys)
++{
++      struct ext3_extent_idx *ix;
++      struct ext3_extent *ex;
++      int depth;
++
++      BUG_ON(path == NULL);
++      depth = path->p_depth;
++      *phys = 0;
++
++      if (depth == 0 && path->p_ext == NULL)
++              return 0;
++
++      /* usually extent in the path covers blocks smaller
++       * then *logical, but it can be that extent is the
++       * first one in the file */
++
++      ex = path[depth].p_ext;
++      if (*logical < le32_to_cpu(ex->ee_block)) {
++              BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
++              while (--depth >= 0) {
++                      ix = path[depth].p_idx;
++                      BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
++              }
++              return 0;
++      }
++
++      BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
++
++      *logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1;
++      *phys = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - 1;
++      return 0;
++}
++EXPORT_SYMBOL(ext3_ext_search_left);
++
++/*
++ * search the closest allocated block to the right for *logical
++ * and returns it at @logical + it's physical address at @phys
++ * if *logical is the smallest allocated block, the function
++ * returns 0 at @phys
++ * return value contains 0 (success) or error code
++ */
++int
++ext3_ext_search_right(struct inode *inode, struct ext3_ext_path *path,
++                      unsigned long *logical, unsigned long *phys)
++{
++      struct buffer_head *bh = NULL;
++      struct ext3_extent_header *eh;
++      struct ext3_extent_idx *ix;
++      struct ext3_extent *ex;
++      unsigned long block;
++      int depth;
++
++      BUG_ON(path == NULL);
++      depth = path->p_depth;
++      *phys = 0;
++
++      if (depth == 0 && path->p_ext == NULL)
++              return 0;
++
++      /* usually extent in the path covers blocks smaller
++       * then *logical, but it can be that extent is the
++       * first one in the file */
++
++      ex = path[depth].p_ext;
++      if (*logical < le32_to_cpu(ex->ee_block)) {
++              BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
++              while (--depth >= 0) {
++                      ix = path[depth].p_idx;
++                      BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
++              }
++              *logical = le32_to_cpu(ex->ee_block);
++              *phys = le32_to_cpu(ex->ee_start);
++              return 0;
++      }
++
++      BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
++
++      if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
++              /* next allocated block in this leaf */
++              ex++;
++              *logical = le32_to_cpu(ex->ee_block);
++              *phys = le32_to_cpu(ex->ee_start);
++              return 0;
++      }
++
++      /* go up and search for index to the right */
++      while (--depth >= 0) {
++              ix = path[depth].p_idx;
++              if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
++                      break;
++      }
++
++      if (depth < 0) {
++              /* we've gone up to the root and
++               * found no index to the right */
++              return 0;
++      }
++
++      /* we've found index to the right, let's
++       * follow it and find the closest allocated
++       * block to the right */
++      ix++;
++      block = le32_to_cpu(ix->ei_leaf);
++      while (++depth < path->p_depth) {
++              bh = sb_bread(inode->i_sb, block);
++              if (bh == NULL)
++                      return -EIO;
++              eh = ext_block_hdr(bh);
++              if (ext3_ext_check_header(inode, eh, path->p_depth - depth)) {
++                      brelse(bh);
++                      return -EIO;
++              }
++              ix = EXT_FIRST_INDEX(eh);
++              block = le32_to_cpu(ix->ei_leaf);
++              brelse(bh);
++      }
++
++      bh = sb_bread(inode->i_sb, block);
++      if (bh == NULL)
++              return -EIO;
++      eh = ext_block_hdr(bh);
++      if (ext3_ext_check_header(inode, eh, 0)) {
++              brelse(bh);
++              return -EIO;
++      }
++      ex = EXT_FIRST_EXTENT(eh);
++      *logical = le32_to_cpu(ex->ee_block);
++      *phys = le32_to_cpu(ex->ee_start);
++      brelse(bh);
++      return 0;
++
++}
++EXPORT_SYMBOL(ext3_ext_search_right);
++
++
++
++/*
++ * returns allocated block in subsequent extent or EXT_MAX_BLOCK
++ * NOTE: it consider block number from index entry as
++ * allocated block. thus, index entries have to be consistent
++ * with leafs
++ */
++static unsigned long
++ext3_ext_next_allocated_block(struct ext3_ext_path *path)
++{
++      int depth;
++
++      BUG_ON(path == NULL);
++      depth = path->p_depth;
++
++      if (depth == 0 && path->p_ext == NULL)
++              return EXT_MAX_BLOCK;
++
++      while (depth >= 0) {
++              if (depth == path->p_depth) {
++                      /* leaf */
++                      if (path[depth].p_ext !=
++                                      EXT_LAST_EXTENT(path[depth].p_hdr))
++                        return le32_to_cpu(path[depth].p_ext[1].ee_block);
++              } else {
++                      /* index */
++                      if (path[depth].p_idx !=
++                                      EXT_LAST_INDEX(path[depth].p_hdr))
++                        return le32_to_cpu(path[depth].p_idx[1].ei_block);
++              }
++              depth--;
++      }
++
++      return EXT_MAX_BLOCK;
++}
++
++/*
++ * returns first allocated block from next leaf or EXT_MAX_BLOCK
++ */
++static unsigned ext3_ext_next_leaf_block(struct inode *inode,
++                                               struct ext3_ext_path *path)
++{
++      int depth;
++
++      BUG_ON(path == NULL);
++      depth = path->p_depth;
++
++      /* zero-tree has no leaf blocks at all */
++      if (depth == 0)
++              return EXT_MAX_BLOCK;
++
++      /* go to index block */
++      depth--;
++
++      while (depth >= 0) {
++              if (path[depth].p_idx !=
++                              EXT_LAST_INDEX(path[depth].p_hdr))
++                return le32_to_cpu(path[depth].p_idx[1].ei_block);
++              depth--;
++      }
++
++      return EXT_MAX_BLOCK;
++}
++
++/*
++ * if leaf gets modified and modified extent is first in the leaf
++ * then we have to correct all indexes above
++ * TODO: do we need to correct tree in all cases?
++ */
++int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *path)
++{
++      struct ext3_extent_header *eh;
++      int depth = ext_depth(inode);
++      struct ext3_extent *ex;
++      __le32 border;
++      int k, err = 0;
++
++      eh = path[depth].p_hdr;
++      ex = path[depth].p_ext;
++      BUG_ON(ex == NULL);
++      BUG_ON(eh == NULL);
++
++      if (depth == 0) {
++              /* there is no tree at all */
++              return 0;
++      }
++
++      if (ex != EXT_FIRST_EXTENT(eh)) {
++              /* we correct tree if first leaf got modified only */
++              return 0;
++      }
++
++      /*
++       * TODO: we need correction if border is smaller then current one
++       */
++      k = depth - 1;
++      border = path[depth].p_ext->ee_block;
++      if ((err = ext3_ext_get_access(handle, inode, path + k)))
++              return err;
++      path[k].p_idx->ei_block = border;
++      if ((err = ext3_ext_dirty(handle, inode, path + k)))
++              return err;
++
++      while (k--) {
++              /* change all left-side indexes */
++              if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
++                      break;
++              if ((err = ext3_ext_get_access(handle, inode, path + k)))
++                      break;
++              path[k].p_idx->ei_block = border;
++              if ((err = ext3_ext_dirty(handle, inode, path + k)))
++                      break;
++      }
++
++      return err;
++}
++
++static int inline
++ext3_can_extents_be_merged(struct inode *inode, struct ext3_extent *ex1,
++                              struct ext3_extent *ex2)
++{
++      /* FIXME: 48bit support */
++        if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len)
++          != le32_to_cpu(ex2->ee_block))
++              return 0;
++
++#ifdef AGRESSIVE_TEST
++      if (le16_to_cpu(ex1->ee_len) >= 4)
++              return 0;
++#endif
++
++        if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len)
++                      == le32_to_cpu(ex2->ee_start))
++              return 1;
++      return 0;
++}
++
++/*
++ * this routine tries to merge requsted extent into the existing
++ * extent or inserts requested extent as new one into the tree,
++ * creating new leaf in no-space case
++ */
++int ext3_ext_insert_extent(handle_t *handle, struct inode *inode,
++                              struct ext3_ext_path *path,
++                              struct ext3_extent *newext)
++{
++      struct ext3_extent_header * eh;
++      struct ext3_extent *ex, *fex;
++      struct ext3_extent *nearex; /* nearest extent */
++      struct ext3_ext_path *npath = NULL;
++      int depth, len, err, next;
++
++      BUG_ON(newext->ee_len == 0);
++      depth = ext_depth(inode);
++      ex = path[depth].p_ext;
++      BUG_ON(path[depth].p_hdr == NULL);
++
++      /* try to insert block into found extent and return */
++      if (ex && ext3_can_extents_be_merged(inode, ex, newext)) {
++              ext_debug(inode, "append %d block to %d:%d (from %d)\n",
++                              le16_to_cpu(newext->ee_len),
++                              le32_to_cpu(ex->ee_block),
++                              le16_to_cpu(ex->ee_len),
++                              le32_to_cpu(ex->ee_start));
++              if ((err = ext3_ext_get_access(handle, inode, path + depth)))
++                      return err;
++              ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
++                                       + le16_to_cpu(newext->ee_len));
++              eh = path[depth].p_hdr;
++              nearex = ex;
++              goto merge;
++      }
++
++repeat:
++      depth = ext_depth(inode);
++      eh = path[depth].p_hdr;
++      if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
++              goto has_space;
++
++      /* probably next leaf has space for us? */
++      fex = EXT_LAST_EXTENT(eh);
++      next = ext3_ext_next_leaf_block(inode, path);
++      if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
++          && next != EXT_MAX_BLOCK) {
++              ext_debug(inode, "next leaf block - %d\n", next);
++              BUG_ON(npath != NULL);
++              npath = ext3_ext_find_extent(inode, next, NULL);
++              if (IS_ERR(npath))
++                      return PTR_ERR(npath);
++              BUG_ON(npath->p_depth != path->p_depth);
++              eh = npath[depth].p_hdr;
++              if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
++                      ext_debug(inode, "next leaf isnt full(%d)\n",
++                                le16_to_cpu(eh->eh_entries));
++                      path = npath;
++                      goto repeat;
++              }
++              ext_debug(inode, "next leaf has no free space(%d,%d)\n",
++                        le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
++      }
++
++      /*
++       * there is no free space in found leaf
++       * we're gonna add new leaf in the tree
++       */
++      err = ext3_ext_create_new_leaf(handle, inode, path, newext);
++      if (err)
++              goto cleanup;
++      depth = ext_depth(inode);
++      eh = path[depth].p_hdr;
++
++has_space:
++      nearex = path[depth].p_ext;
++
++      if ((err = ext3_ext_get_access(handle, inode, path + depth)))
++              goto cleanup;
++
++      if (!nearex) {
++              /* there is no extent in this leaf, create first one */
++              ext_debug(inode, "first extent in the leaf: %d:%d:%d\n",
++                              le32_to_cpu(newext->ee_block),
++                              le32_to_cpu(newext->ee_start),
++                              le16_to_cpu(newext->ee_len));
++              path[depth].p_ext = EXT_FIRST_EXTENT(eh);
++      } else if (le32_to_cpu(newext->ee_block)
++                         > le32_to_cpu(nearex->ee_block)) {
++              /* BUG_ON(newext->ee_block == nearex->ee_block); */
++              if (nearex != EXT_LAST_EXTENT(eh)) {
++                      len = EXT_MAX_EXTENT(eh) - nearex;
++                      len = (len - 1) * sizeof(struct ext3_extent);
++                      len = len < 0 ? 0 : len;
++                      ext_debug(inode, "insert %d:%d:%d after: nearest 0x%p, "
++                                      "move %d from 0x%p to 0x%p\n",
++                                      le32_to_cpu(newext->ee_block),
++                                      le32_to_cpu(newext->ee_start),
++                                      le16_to_cpu(newext->ee_len),
++                                      nearex, len, nearex + 1, nearex + 2);
++                      memmove(nearex + 2, nearex + 1, len);
++              }
++              path[depth].p_ext = nearex + 1;
++      } else {
++              BUG_ON(newext->ee_block == nearex->ee_block);
++              len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
++              len = len < 0 ? 0 : len;
++              ext_debug(inode, "insert %d:%d:%d before: nearest 0x%p, "
++                              "move %d from 0x%p to 0x%p\n",
++                              le32_to_cpu(newext->ee_block),
++                              le32_to_cpu(newext->ee_start),
++                              le16_to_cpu(newext->ee_len),
++                              nearex, len, nearex + 1, nearex + 2);
++              memmove(nearex + 1, nearex, len);
++              path[depth].p_ext = nearex;
++      }
++
++      eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
++      nearex = path[depth].p_ext;
++      nearex->ee_block = newext->ee_block;
++      nearex->ee_start = newext->ee_start;
++      nearex->ee_len = newext->ee_len;
++      /* FIXME: support for large fs */
++      nearex->ee_start_hi = 0;
++
++merge:
++      /* try to merge extents to the right */
++      while (nearex < EXT_LAST_EXTENT(eh)) {
++              if (!ext3_can_extents_be_merged(inode, nearex, nearex + 1))
++                      break;
++              /* merge with next extent! */
++              nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
++                                           + le16_to_cpu(nearex[1].ee_len));
++              if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
++                      len = (EXT_LAST_EXTENT(eh) - nearex - 1)
++                                      * sizeof(struct ext3_extent);
++                      memmove(nearex + 1, nearex + 2, len);
++              }
++              eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
++              BUG_ON(eh->eh_entries == 0);
++      }
++
++      /* try to merge extents to the left */
++
++      /* time to correct all indexes above */
++      err = ext3_ext_correct_indexes(handle, inode, path);
++      if (err)
++              goto cleanup;
++
++      err = ext3_ext_dirty(handle, inode, path + depth);
++
++cleanup:
++      if (npath) {
++              ext3_ext_drop_refs(npath);
++              kfree(npath);
++      }
++      ext3_ext_tree_changed(inode);
++      ext3_ext_invalidate_cache(inode);
++      return err;
++}
++
++int ext3_ext_walk_space(struct inode *inode, unsigned long block,
++                      unsigned long num, ext_prepare_callback func,
++                      void *cbdata)
++{
++      struct ext3_ext_path *path = NULL;
++      struct ext3_ext_cache cbex;
++      struct ext3_extent *ex;
++      unsigned long next, start = 0, end = 0;
++      unsigned long last = block + num;
++      int depth, exists, err = 0;
++
++      BUG_ON(func == NULL);
++      BUG_ON(inode == NULL);
++
++      while (block < last && block != EXT_MAX_BLOCK) {
++              num = last - block;
++              /* find extent for this block */
++              path = ext3_ext_find_extent(inode, block, path);
++              if (IS_ERR(path)) {
++                      err = PTR_ERR(path);
++                      path = NULL;
++                      break;
++              }
++
++              depth = ext_depth(inode);
++              BUG_ON(path[depth].p_hdr == NULL);
++              ex = path[depth].p_ext;
++              next = ext3_ext_next_allocated_block(path);
++
++              exists = 0;
++              if (!ex) {
++                      /* there is no extent yet, so try to allocate
++                       * all requested space */
++                      start = block;
++                      end = block + num;
++              } else if (le32_to_cpu(ex->ee_block) > block) {
++                      /* need to allocate space before found extent */
++                      start = block;
++                      end = le32_to_cpu(ex->ee_block);
++                      if (block + num < end)
++                              end = block + num;
++              } else if (block >=
++                           le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
++                      /* need to allocate space after found extent */
++                      start = block;
++                      end = block + num;
++                      if (end >= next)
++                              end = next;
++              } else if (block >= le32_to_cpu(ex->ee_block)) {
++                      /*
++                       * some part of requested space is covered
++                       * by found extent
++                       */
++                      start = block;
++                      end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
++                      if (block + num < end)
++                              end = block + num;
++                      exists = 1;
++              } else {
++                      BUG();
++              }
++              BUG_ON(end <= start);
++
++              if (!exists) {
++                      cbex.ec_block = start;
++                      cbex.ec_len = end - start;
++                      cbex.ec_start = 0;
++                      cbex.ec_type = EXT3_EXT_CACHE_GAP;
++              } else {
++                      cbex.ec_block = le32_to_cpu(ex->ee_block);
++                      cbex.ec_len = le16_to_cpu(ex->ee_len);
++                      cbex.ec_start = le32_to_cpu(ex->ee_start);
++                      cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
++              }
++
++              BUG_ON(cbex.ec_len == 0);
++              err = func(inode, path, &cbex, cbdata);
++              ext3_ext_drop_refs(path);
++
++              if (err < 0)
++                      break;
++              if (err == EXT_REPEAT)
++                      continue;
++              else if (err == EXT_BREAK) {
++                      err = 0;
++                      break;
++              }
++
++              if (ext_depth(inode) != depth) {
++                      /* depth was changed. we have to realloc path */
++                      kfree(path);
++                      path = NULL;
++              }
++
++              block = cbex.ec_block + cbex.ec_len;
++      }
++
++      if (path) {
++              ext3_ext_drop_refs(path);
++              kfree(path);
++      }
++
++      return err;
++}
++
++static inline void
++ext3_ext_put_in_cache(struct inode *inode, __u32 block,
++                      __u32 len, __u32 start, int type)
++{
++      struct ext3_ext_cache *cex;
++      BUG_ON(len == 0);
++      cex = &EXT3_I(inode)->i_cached_extent;
++      cex->ec_type = type;
++      cex->ec_block = block;
++      cex->ec_len = len;
++      cex->ec_start = start;
++}
++
++/*
++ * this routine calculate boundaries of the gap requested block fits into
++ * and cache this gap
++ */
++static inline void
++ext3_ext_put_gap_in_cache(struct inode *inode, struct ext3_ext_path *path,
++                              unsigned long block)
++{
++      int depth = ext_depth(inode);
++      unsigned long lblock, len;
++      struct ext3_extent *ex;
++
++      ex = path[depth].p_ext;
++      if (ex == NULL) {
++              /* there is no extent yet, so gap is [0;-] */
++              lblock = 0;
++              len = EXT_MAX_BLOCK;
++              ext_debug(inode, "cache gap(whole file):");
++      } else if (block < le32_to_cpu(ex->ee_block)) {
++              lblock = block;
++              len = le32_to_cpu(ex->ee_block) - block;
++              ext_debug(inode, "cache gap(before): %lu [%lu:%lu]",
++                              (unsigned long) block,
++                              (unsigned long) le32_to_cpu(ex->ee_block),
++                              (unsigned long) le16_to_cpu(ex->ee_len));
++      } else if (block >= le32_to_cpu(ex->ee_block)
++                          + le16_to_cpu(ex->ee_len)) {
++              lblock = le32_to_cpu(ex->ee_block)
++                       + le16_to_cpu(ex->ee_len);
++              len = ext3_ext_next_allocated_block(path);
++              ext_debug(inode, "cache gap(after): [%lu:%lu] %lu",
++                              (unsigned long) le32_to_cpu(ex->ee_block),
++                              (unsigned long) le16_to_cpu(ex->ee_len),
++                              (unsigned long) block);
++              BUG_ON(len == lblock);
++              len = len - lblock;
++      } else {
++              lblock = len = 0;
++              BUG();
++      }
++
++      ext_debug(inode, " -> %lu:%lu\n", (unsigned long) lblock, len);
++      ext3_ext_put_in_cache(inode, lblock, len, 0, EXT3_EXT_CACHE_GAP);
++}
++
++static inline int
++ext3_ext_in_cache(struct inode *inode, unsigned long block,
++                      struct ext3_extent *ex)
++{
++      struct ext3_ext_cache *cex;
++
++      cex = &EXT3_I(inode)->i_cached_extent;
++
++      /* has cache valid data? */
++      if (cex->ec_type == EXT3_EXT_CACHE_NO)
++              return EXT3_EXT_CACHE_NO;
++
++      BUG_ON(cex->ec_type != EXT3_EXT_CACHE_GAP &&
++                      cex->ec_type != EXT3_EXT_CACHE_EXTENT);
++      if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
++              ex->ee_block = cpu_to_le32(cex->ec_block);
++              ex->ee_start = cpu_to_le32(cex->ec_start);
++              ex->ee_start_hi = 0;
++              ex->ee_len = cpu_to_le16(cex->ec_len);
++              ext_debug(inode, "%lu cached by %lu:%lu:%lu\n",
++                              (unsigned long) block,
++                              (unsigned long) cex->ec_block,
++                              (unsigned long) cex->ec_len,
++                              (unsigned long) cex->ec_start);
++              return cex->ec_type;
++      }
++
++      /* not in cache */
++      return EXT3_EXT_CACHE_NO;
++}
++
++/*
++ * routine removes index from the index block
++ * it's used in truncate case only. thus all requests are for
++ * last index in the block only
++ */
++int ext3_ext_rm_idx(handle_t *handle, struct inode *inode,
++                      struct ext3_ext_path *path)
++{
++      struct buffer_head *bh;
++      int err;
++      unsigned long leaf;
++
++      /* free index block */
++      path--;
++      leaf = le32_to_cpu(path->p_idx->ei_leaf);
++      BUG_ON(path->p_hdr->eh_entries == 0);
++      if ((err = ext3_ext_get_access(handle, inode, path)))
++              return err;
++      path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
++      if ((err = ext3_ext_dirty(handle, inode, path)))
++              return err;
++      ext_debug(inode, "index is empty, remove it, free block %lu\n", leaf);
++      bh = sb_find_get_block(inode->i_sb, leaf);
++      ext3_forget(handle, 1, inode, bh, leaf);
++      ext3_free_blocks(handle, inode, leaf, 1);
++      return err;
++}
++
++/*
++ * This routine returns max. credits extent tree can consume.
++ * It should be OK for low-performance paths like ->writepage()
++ * To allow many writing process to fit a single transaction,
++ * caller should calculate credits under truncate_mutex and
++ * pass actual path.
++ */
++int inline ext3_ext_calc_credits_for_insert(struct inode *inode,
++                                              struct ext3_ext_path *path)
++{
++      int depth, needed;
++
++      if (path) {
++              /* probably there is space in leaf? */
++              depth = ext_depth(inode);
++              if (le16_to_cpu(path[depth].p_hdr->eh_entries)
++                              < le16_to_cpu(path[depth].p_hdr->eh_max))
++                      return 1;
++      }
++
++      /*
++       * given 32bit logical block (4294967296 blocks), max. tree
++       * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
++       * let's also add one more level for imbalance.
++       */
++      depth = 5;
++
++      /* allocation of new data block(s) */
++      needed = 2;
++
++      /*
++       * tree can be full, so it'd need to grow in depth:
++       * we need one credit to modify old root, credits for
++       * new root will be added in split accounting
++       */
++      needed += 1;
++
++      /*
++       * Index split can happen, we'd need:
++       *    allocate intermediate indexes (bitmap + group)
++       *  + change two blocks at each level, but root (already included)
++       */
++      needed += (depth * 2) + (depth * 2);
++
++      /* any allocation modifies superblock */
++      needed += 1;
++
++      return needed;
++}
++
++static int ext3_remove_blocks(handle_t *handle, struct inode *inode,
++                              struct ext3_extent *ex,
++                              unsigned long from, unsigned long to)
++{
++      struct buffer_head *bh;
++      int i;
++
++#ifdef EXTENTS_STATS
++      {
++              struct ext3_sb_info *sbi = EXT3_SB(inode->i_sb);
++              unsigned short ee_len =  le16_to_cpu(ex->ee_len);
++              spin_lock(&sbi->s_ext_stats_lock);
++              sbi->s_ext_blocks += ee_len;
++              sbi->s_ext_extents++;
++              if (ee_len < sbi->s_ext_min)
++                      sbi->s_ext_min = ee_len;
++              if (ee_len > sbi->s_ext_max)
++                      sbi->s_ext_max = ee_len;
++              if (ext_depth(inode) > sbi->s_depth_max)
++                      sbi->s_depth_max = ext_depth(inode);
++              spin_unlock(&sbi->s_ext_stats_lock);
++      }
++#endif
++      if (from >= le32_to_cpu(ex->ee_block)
++          && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
++              /* tail removal */
++              unsigned long num, start;
++              num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
++              start = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - num;
++              ext_debug(inode, "free last %lu blocks starting %lu\n", num, start);
++              for (i = 0; i < num; i++) {
++                      bh = sb_find_get_block(inode->i_sb, start + i);
++                      ext3_forget(handle, 0, inode, bh, start + i);
++              }
++              ext3_free_blocks(handle, inode, start, num);
++      } else if (from == le32_to_cpu(ex->ee_block)
++                 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
++              printk("strange request: removal %lu-%lu from %u:%u\n",
++                     from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
++      } else {
++              printk("strange request: removal(2) %lu-%lu from %u:%u\n",
++                     from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
++      }
++      return 0;
++}
++
++static int
++ext3_ext_rm_leaf(handle_t *handle, struct inode *inode,
++              struct ext3_ext_path *path, unsigned long start)
++{
++      int err = 0, correct_index = 0;
++      int depth = ext_depth(inode), credits;
++      struct ext3_extent_header *eh;
++      unsigned a, b, block, num;
++      unsigned long ex_ee_block;
++      unsigned short ex_ee_len;
++      struct ext3_extent *ex;
++
++      /* the header must be checked already in ext3_ext_remove_space() */
++      ext_debug(inode, "truncate since %lu in leaf\n", start);
++      if (!path[depth].p_hdr)
++              path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
++      eh = path[depth].p_hdr;
++      BUG_ON(eh == NULL);
++
++      /* find where to start removing */
++      ex = EXT_LAST_EXTENT(eh);
++
++      ex_ee_block = le32_to_cpu(ex->ee_block);
++      ex_ee_len = le16_to_cpu(ex->ee_len);
++
++      while (ex >= EXT_FIRST_EXTENT(eh) &&
++                      ex_ee_block + ex_ee_len > start) {
++              ext_debug(inode, "remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
++              path[depth].p_ext = ex;
++
++              a = ex_ee_block > start ? ex_ee_block : start;
++              b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
++                      ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
++
++              ext_debug(inode, "  border %u:%u\n", a, b);
++
++              if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
++                      block = 0;
++                      num = 0;
++                      BUG();
++              } else if (a != ex_ee_block) {
++                      /* remove tail of the extent */
++                      block = ex_ee_block;
++                      num = a - block;
++              } else if (b != ex_ee_block + ex_ee_len - 1) {
++                      /* remove head of the extent */
++                      block = a;
++                      num = b - a;
++                      /* there is no "make a hole" API yet */
++                      BUG();
++              } else {
++                      /* remove whole extent: excellent! */
++                      block = ex_ee_block;
++                      num = 0;
++                      BUG_ON(a != ex_ee_block);
++                      BUG_ON(b != ex_ee_block + ex_ee_len - 1);
++              }
++
++              /* at present, extent can't cross block group */
++              /* leaf + bitmap + group desc + sb + inode */
++              credits = 5;
++              if (ex == EXT_FIRST_EXTENT(eh)) {
++                      correct_index = 1;
++                      credits += (ext_depth(inode)) + 1;
++              }
++#ifdef CONFIG_QUOTA
++              credits += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
++#endif
++
++              handle = ext3_ext_journal_restart(handle, credits);
++              if (IS_ERR(handle)) {
++                      err = PTR_ERR(handle);
++                      goto out;
++              }
++
++              err = ext3_ext_get_access(handle, inode, path + depth);
++              if (err)
++                      goto out;
++
++              err = ext3_remove_blocks(handle, inode, ex, a, b);
++              if (err)
++                      goto out;
++
++              if (num == 0) {
++                      /* this extent is removed entirely mark slot unused */
++                      ex->ee_start = ex->ee_start_hi = 0;
++                      eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
++              }
++
++              ex->ee_block = cpu_to_le32(block);
++              ex->ee_len = cpu_to_le16(num);
++
++              err = ext3_ext_dirty(handle, inode, path + depth);
++              if (err)
++                      goto out;
++
++              ext_debug(inode, "new extent: %u:%u:%u\n", block, num,
++                              le32_to_cpu(ex->ee_start));
++              ex--;
++              ex_ee_block = le32_to_cpu(ex->ee_block);
++              ex_ee_len = le16_to_cpu(ex->ee_len);
++      }
++
++      if (correct_index && eh->eh_entries)
++              err = ext3_ext_correct_indexes(handle, inode, path);
++
++      /* if this leaf is free, then we should
++       * remove it from index block above */
++      if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
++              err = ext3_ext_rm_idx(handle, inode, path + depth);
++
++out:
++      return err;
++}
++
++/*
++ * returns 1 if current index have to be freed (even partial)
++ */
++static int inline
++ext3_ext_more_to_rm(struct ext3_ext_path *path)
++{
++      BUG_ON(path->p_idx == NULL);
++
++      if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
++              return 0;
++
++      /*
++       * if truncate on deeper level happened it it wasn't partial
++       * so we have to consider current index for truncation
++       */
++      if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
++              return 0;
++      return 1;
++}
++
++int ext3_ext_remove_space(struct inode *inode, unsigned long start)
++{
++      struct super_block *sb = inode->i_sb;
++      int depth = ext_depth(inode);
++      struct ext3_ext_path *path;
++      handle_t *handle;
++      int i = 0, err = 0;
++
++      ext_debug(inode, "truncate since %lu\n", start);
++
++      /* probably first extent we're gonna free will be last in block */
++      handle = ext3_journal_start(inode, depth + 1);
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      ext3_ext_invalidate_cache(inode);
++
++      /*
++       * we start scanning from right side freeing all the blocks
++       * after i_size and walking into the deep
++       */
++      path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
++      if (path == NULL) {
++              ext3_journal_stop(handle);
++              return -ENOMEM;
++      }
++      memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
++      path[0].p_hdr = ext_inode_hdr(inode);
++      if (ext3_ext_check_header(inode, path[0].p_hdr, depth)) {
++              err = -EIO;
++              goto out;
++      }
++      path[0].p_depth = depth;
++
++      while (i >= 0 && err == 0) {
++              if (i == depth) {
++                      /* this is leaf block */
++                      err = ext3_ext_rm_leaf(handle, inode, path, start);
++                      /* root level have p_bh == NULL, brelse() eats this */
++                      brelse(path[i].p_bh);
++                      path[i].p_bh = NULL;
++                      i--;
++                      continue;
++              }
++
++              /* this is index block */
++              if (!path[i].p_hdr) {
++                      ext_debug(inode, "initialize header\n");
++                      path[i].p_hdr = ext_block_hdr(path[i].p_bh);
++              }
++
++              if (!path[i].p_idx) {
++                      /* this level hasn't touched yet */
++                      path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
++                      path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
++                      ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
++                                path[i].p_hdr,
++                                le16_to_cpu(path[i].p_hdr->eh_entries));
++              } else {
++                      /* we've already was here, see at next index */
++                      path[i].p_idx--;
++              }
++
++              ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
++                              i, EXT_FIRST_INDEX(path[i].p_hdr),
++                              path[i].p_idx);
++              if (ext3_ext_more_to_rm(path + i)) {
++                      struct buffer_head *bh;
++                      /* go to the next level */
++                      ext_debug(inode, "move to level %d (block %d)\n",
++                                i + 1, le32_to_cpu(path[i].p_idx->ei_leaf));
++                      memset(path + i + 1, 0, sizeof(*path));
++                      bh = sb_bread(sb, le32_to_cpu(path[i].p_idx->ei_leaf));
++                      if (!bh) {
++                              /* should we reset i_size? */
++                              err = -EIO;
++                              break;
++                      }
++                      BUG_ON(i + 1 > depth);
++                      if (ext3_ext_check_header(inode, ext_block_hdr(bh),
++                                                      depth - i - 1)) {
++                              err = -EIO;
++                              break;
++                      }
++                      path[i+1].p_bh = bh;
++
++                      /* put actual number of indexes to know is this
++                       * number got changed at the next iteration */
++                      path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
++                      i++;
++              } else {
++                      /* we finish processing this index, go up */
++                      if (path[i].p_hdr->eh_entries == 0 && i > 0) {
++                              /* index is empty, remove it
++                               * handle must be already prepared by the
++                               * truncatei_leaf() */
++                              err = ext3_ext_rm_idx(handle, inode, path + i);
++                      }
++                      /* root level have p_bh == NULL, brelse() eats this */
++                      brelse(path[i].p_bh);
++                      path[i].p_bh = NULL;
++                      i--;
++                      ext_debug(inode, "return to level %d\n", i);
++              }
++      }
++
++      /* TODO: flexible tree reduction should be here */
++      if (path->p_hdr->eh_entries == 0) {
++              /*
++               * truncate to zero freed all the tree
++               * so, we need to correct eh_depth
++               */
++              err = ext3_ext_get_access(handle, inode, path);
++              if (err == 0) {
++                      ext_inode_hdr(inode)->eh_depth = 0;
++                      ext_inode_hdr(inode)->eh_max =
++                              cpu_to_le16(ext3_ext_space_root(inode));
++                      err = ext3_ext_dirty(handle, inode, path);
++              }
++      }
++out:
++      ext3_ext_tree_changed(inode);
++      ext3_ext_drop_refs(path);
++      kfree(path);
++      ext3_journal_stop(handle);
++
++      return err;
++}
++
++/*
++ * called at mount time
++ */
++void ext3_ext_init(struct super_block *sb)
++{
++      /*
++       * possible initialization would be here
++       */
++
++      if (test_opt(sb, EXTENTS)) {
++              printk("EXT3-fs: file extents enabled");
++#ifdef AGRESSIVE_TEST
++              printk(", agressive tests");
++#endif
++#ifdef CHECK_BINSEARCH
++              printk(", check binsearch");
++#endif
++#ifdef EXTENTS_STATS
++              printk(", stats");
++#endif
++              printk("\n");
++#ifdef EXTENTS_STATS
++              spin_lock_init(&EXT3_SB(sb)->s_ext_stats_lock);
++              EXT3_SB(sb)->s_ext_min = 1 << 30;
++              EXT3_SB(sb)->s_ext_max = 0;
++#endif
++      }
++}
++
++/*
++ * called at umount time
++ */
++void ext3_ext_release(struct super_block *sb)
++{
++      if (!test_opt(sb, EXTENTS))
++              return;
++
++#ifdef EXTENTS_STATS
++      if (EXT3_SB(sb)->s_ext_blocks && EXT3_SB(sb)->s_ext_extents) {
++              struct ext3_sb_info *sbi = EXT3_SB(sb);
++              printk(KERN_ERR "EXT3-fs: %lu blocks in %lu extents (%lu ave)\n",
++                      sbi->s_ext_blocks, sbi->s_ext_extents,
++                      sbi->s_ext_blocks / sbi->s_ext_extents);
++              printk(KERN_ERR "EXT3-fs: extents: %lu min, %lu max, max depth %lu\n",
++                      sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
++      }
++#endif
++}
++
++int ext3_ext_get_blocks(handle_t *handle, struct inode *inode, sector_t iblock,
++                      unsigned long max_blocks, struct buffer_head *bh_result,
++                      int create, int extend_disksize)
++{
++      struct ext3_ext_path *path = NULL;
++      struct ext3_extent newex, *ex;
++      int goal, newblock, err = 0, depth;
++      unsigned long allocated = 0;
++      unsigned long next;
++
++      __clear_bit(BH_New, &bh_result->b_state);
++      ext_debug(inode, "blocks %d/%lu requested for inode %u\n", (int) iblock,
++                      max_blocks, (unsigned) inode->i_ino);
++      mutex_lock(&EXT3_I(inode)->truncate_mutex);
++
++      /* check in cache */
++      if ((goal = ext3_ext_in_cache(inode, iblock, &newex))) {
++              if (goal == EXT3_EXT_CACHE_GAP) {
++                      if (!create) {
++                              /* block isn't allocated yet and
++                               * user don't want to allocate it */
++                              goto out2;
++                      }
++                      /* we should allocate requested block */
++              } else if (goal == EXT3_EXT_CACHE_EXTENT) {
++                      /* block is already allocated */
++                      newblock = iblock
++                                 - le32_to_cpu(newex.ee_block)
++                                 + le32_to_cpu(newex.ee_start);
++                      /* number of remain blocks in the extent */
++                      BUG_ON(iblock < le32_to_cpu(newex.ee_block));
++                      allocated = le16_to_cpu(newex.ee_len) -
++                                      (iblock - le32_to_cpu(newex.ee_block));
++                      goto out;
++              } else {
++                      BUG();
++              }
++      }
++
++      /* find extent for this block */
++      path = ext3_ext_find_extent(inode, iblock, NULL);
++      if (IS_ERR(path)) {
++              err = PTR_ERR(path);
++              path = NULL;
++              goto out2;
++      }
++
++      depth = ext_depth(inode);
++
++      /*
++       * consistent leaf must not be empty
++       * this situations is possible, though, _during_ tree modification
++       * this is why assert can't be put in ext3_ext_find_extent()
++       */
++      BUG_ON(path[depth].p_ext == NULL && depth != 0);
++
++      if ((ex = path[depth].p_ext)) {
++              unsigned long ee_block = le32_to_cpu(ex->ee_block);
++              unsigned long ee_start = le32_to_cpu(ex->ee_start);
++              unsigned short ee_len  = le16_to_cpu(ex->ee_len);
++              /* if found exent covers block, simple return it */
++              if (iblock >= ee_block && iblock < ee_block + ee_len) {
++                      newblock = iblock - ee_block + ee_start;
++                      /* number of remain blocks in the extent */
++                      allocated = ee_len - (iblock - ee_block);
++                      ext_debug(inode, "%d fit into %lu:%d -> %d\n", (int) iblock,
++                                      ee_block, ee_len, newblock);
++                      ext3_ext_put_in_cache(inode, ee_block, ee_len,
++                                              ee_start, EXT3_EXT_CACHE_EXTENT);
++                      goto out;
++              }
++      }
++
++      /*
++       * requested block isn't allocated yet
++       * we couldn't try to create block if create flag is zero
++       */
++      if (!create) {
++              /* put just found gap into cache to speedup subsequest reqs */
++              ext3_ext_put_gap_in_cache(inode, path, iblock);
++              goto out2;
++      }
++
++      /*
++       * Okay, we need to do block allocation.  Lazily initialize the block
++       * allocation info here if necessary
++       */
++      if (S_ISREG(inode->i_mode) && (!EXT3_I(inode)->i_block_alloc_info))
++              ext3_init_block_alloc_info(inode);
++
++      /* find next allocated block so that we know how many
++       * blocks we can allocate without ovelapping next extent */
++      BUG_ON(iblock < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
++      next = ext3_ext_next_allocated_block(path);
++      BUG_ON(next <= iblock);
++      allocated = next - iblock;
++      if (allocated > max_blocks)
++              allocated = max_blocks;
++
++      /* allocate new block */
++      goal = ext3_ext_find_goal(inode, path, iblock);
++      newblock = ext3_new_blocks(handle, inode, goal, &allocated, &err);
++      if (!newblock)
++              goto out2;
++      ext_debug(inode, "allocate new block: goal %d, found %d/%lu\n",
++                      goal, newblock, allocated);
++
++      /* try to insert new extent into found leaf and return */
++      newex.ee_block = cpu_to_le32(iblock);
++      newex.ee_start = cpu_to_le32(newblock);
++      newex.ee_start_hi = 0;
++      newex.ee_len = cpu_to_le16(allocated);
++      err = ext3_ext_insert_extent(handle, inode, path, &newex);
++      if (err) {
++              /* free data blocks we just allocated */
++              ext3_free_blocks(handle, inode, le32_to_cpu(newex.ee_start),
++                              le16_to_cpu(newex.ee_len));
++              goto out2;
++      }
++
++      if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
++              EXT3_I(inode)->i_disksize = inode->i_size;
++
++      /* previous routine could use block we allocated */
++      newblock = le32_to_cpu(newex.ee_start);
++      __set_bit(BH_New, &bh_result->b_state);
++
++      ext3_ext_put_in_cache(inode, iblock, allocated, newblock,
++                              EXT3_EXT_CACHE_EXTENT);
++out:
++      if (allocated > max_blocks)
++              allocated = max_blocks;
++      ext3_ext_show_leaf(inode, path);
++      __set_bit(BH_Mapped, &bh_result->b_state);
++      bh_result->b_bdev = inode->i_sb->s_bdev;
++      bh_result->b_blocknr = newblock;
++      bh_result->b_size = (allocated << inode->i_blkbits);
++out2:
++      if (path) {
++              ext3_ext_drop_refs(path);
++              kfree(path);
++      }
++      mutex_unlock(&EXT3_I(inode)->truncate_mutex);
++
++      return err ? err : allocated;
++}
++
++void ext3_ext_truncate(struct inode * inode, struct page *page)
++{
++      struct address_space *mapping = inode->i_mapping;
++      struct super_block *sb = inode->i_sb;
++      unsigned long last_block;
++      handle_t *handle;
++      int err = 0;
++
++      /*
++       * probably first extent we're gonna free will be last in block
++       */
++      err = ext3_writepage_trans_blocks(inode) + 3;
++      handle = ext3_journal_start(inode, err);
++      if (IS_ERR(handle)) {
++              if (page) {
++                      clear_highpage(page);
++                      flush_dcache_page(page);
++                      unlock_page(page);
++                      page_cache_release(page);
++              }
++              return;
++      }
++
++      if (page)
++              ext3_block_truncate_page(handle, page, mapping, inode->i_size);
++
++      mutex_lock(&EXT3_I(inode)->truncate_mutex);
++      ext3_ext_invalidate_cache(inode);
++
++      /*
++       * TODO: optimization is possible here
++       * probably we need not scaning at all,
++       * because page truncation is enough
++       */
++      if (ext3_orphan_add(handle, inode))
++              goto out_stop;
++
++      /* we have to know where to truncate from in crash case */
++      EXT3_I(inode)->i_disksize = inode->i_size;
++      ext3_mark_inode_dirty(handle, inode);
++
++      last_block = (inode->i_size + sb->s_blocksize - 1)
++                      >> EXT3_BLOCK_SIZE_BITS(sb);
++      err = ext3_ext_remove_space(inode, last_block);
++
++      /* In a multi-transaction truncate, we only make the final
++       * transaction synchronous */
++      if (IS_SYNC(inode))
++              handle->h_sync = 1;
++
++out_stop:
++      /*
++       * If this was a simple ftruncate(), and the file will remain alive
++       * then we need to clear up the orphan record which we created above.
++       * However, if this was a real unlink then we were called by
++       * ext3_delete_inode(), and we allow that function to clean up the
++       * orphan info for us.
++       */
++      if (inode->i_nlink)
++              ext3_orphan_del(handle, inode);
++
++      mutex_unlock(&EXT3_I(inode)->truncate_mutex);
++      ext3_journal_stop(handle);
++}
++
++/*
++ * this routine calculate max number of blocks we could modify
++ * in order to allocate new block for an inode
++ */
++int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
++{
++      int needed;
++
++      needed = ext3_ext_calc_credits_for_insert(inode, NULL);
++
++      /* caller want to allocate num blocks, but note it includes sb */
++      needed = needed * num - (num - 1);
++
++#ifdef CONFIG_QUOTA
++      needed += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
++#endif
++
++      return needed;
++}
++
++EXPORT_SYMBOL(ext3_mark_inode_dirty);
++EXPORT_SYMBOL(ext3_ext_invalidate_cache);
++EXPORT_SYMBOL(ext3_ext_insert_extent);
++EXPORT_SYMBOL(ext3_ext_walk_space);
++EXPORT_SYMBOL(ext3_ext_find_goal);
++EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
+Index: linux-2.6.18.8/fs/ext3/ialloc.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/ialloc.c       2007-07-17 09:18:09.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/ialloc.c    2007-07-17 11:08:09.000000000 +0200
+@@ -652,6 +652,17 @@ got:
+               ext3_std_error(sb, err);
+               goto fail_free_drop;
+       }
++      if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
++              EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
++              ext3_ext_tree_init(handle, inode);
++              if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
++                      err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
++                      if (err) goto fail;
++                      EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
++                      BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
++                      err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
++              }
++      }
+       ext3_debug("allocating inode %lu\n", inode->i_ino);
+       goto really_out;
+Index: linux-2.6.18.8/fs/ext3/inode.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/inode.c        2007-07-17 09:18:12.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/inode.c     2007-07-17 11:08:11.000000000 +0200
+@@ -40,8 +40,6 @@
+ #include "iopen.h"
+ #include "acl.h"
+-static int ext3_writepage_trans_blocks(struct inode *inode);
+-
+ /*
+  * Test whether an inode is a fast symlink.
+  */
+@@ -804,6 +802,7 @@ int ext3_get_blocks_handle(handle_t *han
+       ext3_fsblk_t first_block = 0;
++      J_ASSERT(!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL));
+       J_ASSERT(handle != NULL || create == 0);
+       depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
+@@ -984,12 +983,10 @@ static int ext3_get_block(struct inode *
+ get_block:
+       if (ret == 0) {
+-              ret = ext3_get_blocks_handle(handle, inode, iblock,
++              ret = ext3_get_blocks_wrap(handle, inode, iblock,
+                                       max_blocks, bh_result, create, 0);
+-              if (ret > 0) {
+-                      bh_result->b_size = (ret << inode->i_blkbits);
++              if (ret > 0)
+                       ret = 0;
+-              }
+       }
+       return ret;
+ }
+@@ -1008,7 +1005,7 @@ struct buffer_head *ext3_getblk(handle_t
+       dummy.b_state = 0;
+       dummy.b_blocknr = -1000;
+       buffer_trace_init(&dummy.b_history);
+-      err = ext3_get_blocks_handle(handle, inode, block, 1,
++      err = ext3_get_blocks_wrap(handle, inode, block, 1,
+                                       &dummy, create, 1);
+       /*
+        * ext3_get_blocks_handle() returns number of blocks
+@@ -1759,7 +1756,7 @@ void ext3_set_aops(struct inode *inode)
+  * This required during truncate. We need to physically zero the tail end
+  * of that block so it doesn't yield old data if the file is later grown.
+  */
+-static int ext3_block_truncate_page(handle_t *handle, struct page *page,
++int ext3_block_truncate_page(handle_t *handle, struct page *page,
+               struct address_space *mapping, loff_t from)
+ {
+       ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
+@@ -2263,6 +2260,9 @@ void ext3_truncate(struct inode *inode)
+                       return;
+       }
++      if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
++              return ext3_ext_truncate(inode, page);
++
+       handle = start_transaction(inode);
+       if (IS_ERR(handle)) {
+               if (page) {
+@@ -3008,12 +3008,15 @@ err_out:
+  * block and work out the exact number of indirects which are touched.  Pah.
+  */
+-static int ext3_writepage_trans_blocks(struct inode *inode)
++int ext3_writepage_trans_blocks(struct inode *inode)
+ {
+       int bpp = ext3_journal_blocks_per_page(inode);
+       int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
+       int ret;
++      if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
++              return ext3_ext_writepage_trans_blocks(inode, bpp);
++
+       if (ext3_should_journal_data(inode))
+               ret = 3 * (bpp + indirects) + 2;
+       else
+@@ -3260,7 +3263,7 @@ int ext3_map_inode_page(struct inode *in
+               if (blocks[i] != 0)
+                       continue;
+-              rc = ext3_get_blocks_handle(handle, inode, iblock, 1, &dummy, 1, 1);
++              rc = ext3_get_blocks_wrap(handle, inode, iblock, 1, &dummy, 1, 1);
+               if (rc < 0) {
+                       printk(KERN_INFO "ext3_map_inode_page: error reading "
+                                       "block %ld\n", iblock);
+Index: linux-2.6.18.8/fs/ext3/Makefile
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/Makefile       2007-07-17 09:18:11.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/Makefile    2007-07-17 11:08:11.000000000 +0200
+@@ -5,7 +5,8 @@
+ obj-$(CONFIG_EXT3_FS) += ext3.o
+ ext3-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
+-         ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
++         ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \
++         extents.o
+ ext3-$(CONFIG_EXT3_FS_XATTR)   += xattr.o xattr_user.o xattr_trusted.o
+ ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+Index: linux-2.6.18.8/fs/ext3/super.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/super.c        2007-07-17 09:18:12.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/super.c     2007-07-17 11:08:12.000000000 +0200
+@@ -391,6 +391,7 @@ static void ext3_put_super (struct super
+       struct ext3_super_block *es = sbi->s_es;
+       int i;
++      ext3_ext_release(sb);
+       ext3_xattr_put_super(sb);
+       journal_destroy(sbi->s_journal);
+       if (!(sb->s_flags & MS_RDONLY)) {
+@@ -455,6 +456,8 @@ static struct inode *ext3_alloc_inode(st
+ #endif
+       ei->i_block_alloc_info = NULL;
+       ei->vfs_inode.i_version = 1;
++
++      memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
+       return &ei->vfs_inode;
+ }
+@@ -680,7 +683,8 @@ enum {
+       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
+       Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
+       Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
+-      Opt_grpquota
++      Opt_grpquota,
++      Opt_extents, Opt_noextents, Opt_extdebug,
+ };
+ static match_table_t tokens = {
+@@ -733,6 +737,9 @@ static match_table_t tokens = {
+       {Opt_noiopen, "noiopen"},
+       {Opt_iopen_nopriv, "iopen_nopriv"},
+       {Opt_barrier, "barrier=%u"},
++      {Opt_extents, "extents"},
++      {Opt_noextents, "noextents"},
++      {Opt_extdebug, "extdebug"},
+       {Opt_err, NULL},
+       {Opt_resize, "resize"},
+ };
+@@ -1077,6 +1084,15 @@ clear_qf_name:
+               case Opt_bh:
+                       clear_opt(sbi->s_mount_opt, NOBH);
+                       break;
++              case Opt_extents:
++                      set_opt (sbi->s_mount_opt, EXTENTS);
++                      break;
++              case Opt_noextents:
++                      clear_opt (sbi->s_mount_opt, EXTENTS);
++                      break;
++              case Opt_extdebug:
++                      set_opt (sbi->s_mount_opt, EXTDEBUG);
++                      break;
+               default:
+                       printk (KERN_ERR
+                               "EXT3-fs: Unrecognized mount option \"%s\" "
+@@ -1806,6 +1822,8 @@ static int ext3_fill_super (struct super
+               test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
+               "writeback");
++      ext3_ext_init(sb);
++
+       lock_kernel();
+       return 0;
+Index: linux-2.6.18.8/include/linux/ext3_extents.h
+===================================================================
+--- /dev/null  1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.18.8/include/linux/ext3_extents.h        2007-07-17 09:18:14.000000000 +0200
+@@ -0,0 +1,231 @@
++/*
++ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
++ * Written by Alex Tomas <alex@clusterfs.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public Licens
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
++ */
++
++#ifndef _LINUX_EXT3_EXTENTS
++#define _LINUX_EXT3_EXTENTS
++
++#include <linux/ext3_fs.h>
++
++/*
++ * with AGRESSIVE_TEST defined capacity of index/leaf blocks
++ * become very little, so index split, in-depth growing and
++ * other hard changes happens much more often
++ * this is for debug purposes only
++ */
++#define AGRESSIVE_TEST_
++
++/*
++ * with EXTENTS_STATS defined number of blocks and extents
++ * are collected in truncate path. they'll be showed at
++ * umount time
++ */
++#define EXTENTS_STATS__
++
++/*
++ * if CHECK_BINSEARCH defined, then results of binary search
++ * will be checked by linear search
++ */
++#define CHECK_BINSEARCH__
++
++/*
++ * if EXT_DEBUG is defined you can use 'extdebug' mount option
++ * to get lots of info what's going on
++ */
++#define EXT_DEBUG_
++#ifdef EXT_DEBUG
++#define ext_debug(inode,fmt,a...)                       \
++do {                                                    \
++       if (test_opt(inode->i_sb, EXTDEBUG))             \
++               printk(fmt, ##a);                        \
++} while (0);
++#else
++#define ext_debug(inode,fmt,a...)
++#endif
++
++
++/*
++ * if EXT_STATS is defined then stats numbers are collected
++ * these number will be displayed at umount time
++ */
++#define EXT_STATS_
++
++/*
++ * define EXT3_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb
++ * are now accounted in ext3_ext_calc_credits_for_insert()
++ */
++#define EXT3_ALLOC_NEEDED 0
++
++/*
++ * ext3_inode has i_block array (60 bytes total)
++ * first 12 bytes store ext3_extent_header
++ * the remain stores array of ext3_extent
++ */
++
++/*
++ * this is extent on-disk structure
++ * it's used at the bottom of the tree
++ */
++struct ext3_extent {
++      __le32  ee_block;       /* first logical block extent covers */
++      __le16  ee_len;         /* number of blocks covered by extent */
++      __le16  ee_start_hi;    /* high 16 bits of physical block */
++      __le32  ee_start;       /* low 32 bigs of physical block */
++};
++
++/*
++ * this is index on-disk structure
++ * it's used at all the levels, but the bottom
++ */
++struct ext3_extent_idx {
++      __le32  ei_block;       /* index covers logical blocks from 'block' */
++      __le32  ei_leaf;        /* pointer to the physical block of the next *
++                               * level. leaf or next index could bet here */
++      __le16  ei_leaf_hi;     /* high 16 bits of physical block */
++      __u16   ei_unused;
++};
++
++/*
++ * each block (leaves and indexes), even inode-stored has header
++ */
++struct ext3_extent_header {
++      __le16  eh_magic;       /* probably will support different formats */
++      __le16  eh_entries;     /* number of valid entries */
++      __le16  eh_max;         /* capacity of store in entries */
++      __le16  eh_depth;       /* has tree real underlaying blocks? */
++      __le32  eh_generation;  /* flags(8 bits) | generation of the tree */
++};
++
++#define EXT3_EXT_MAGIC                0xf30a
++
++/*
++ * array of ext3_ext_path contains path to some extent
++ * creation/lookup routines use it for traversal/splitting/etc
++ * truncate uses it to simulate recursive walking
++ */
++struct ext3_ext_path {
++      __u32                           p_block;
++      __u16                           p_depth;
++      struct ext3_extent              *p_ext;
++      struct ext3_extent_idx          *p_idx;
++      struct ext3_extent_header       *p_hdr;
++      struct buffer_head              *p_bh;
++};
++
++/*
++ * structure for external API
++ */
++
++#define EXT3_EXT_CACHE_NO     0
++#define EXT3_EXT_CACHE_GAP    1
++#define EXT3_EXT_CACHE_EXTENT 2
++#define EXT3_EXT_HAS_NO_TREE  /* ext3_extents_tree struct is not used*/
++
++/*
++ * to be called by ext3_ext_walk_space()
++ * negative retcode - error
++ * positive retcode - signal for ext3_ext_walk_space(), see below
++ * callback must return valid extent (passed or newly created)
++ */
++typedef int (*ext_prepare_callback)(struct inode *, struct ext3_ext_path *,
++                                      struct ext3_ext_cache *,
++                                      void *);
++
++#define EXT_CONTINUE  0
++#define EXT_BREAK     1
++#define EXT_REPEAT    2
++
++
++#define EXT_MAX_BLOCK 0xffffffff
++
++#define EXT_FLAGS_CLR_UNKNOWN 0x7  /* Flags cleared on modification */
++#define EXT_HDR_GEN_BITS      24
++#define EXT_HDR_GEN_MASK      ((1 << EXT_HDR_GEN_BITS) - 1)
++
++#define EXT_FIRST_EXTENT(__hdr__) \
++      ((struct ext3_extent *) (((char *) (__hdr__)) +         \
++                               sizeof(struct ext3_extent_header)))
++#define EXT_FIRST_INDEX(__hdr__) \
++      ((struct ext3_extent_idx *) (((char *) (__hdr__)) +     \
++                                   sizeof(struct ext3_extent_header)))
++#define EXT_HAS_FREE_INDEX(__path__) \
++        (le16_to_cpu((__path__)->p_hdr->eh_entries) \
++                                   < le16_to_cpu((__path__)->p_hdr->eh_max))
++#define EXT_LAST_EXTENT(__hdr__) \
++      (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
++#define EXT_LAST_INDEX(__hdr__) \
++      (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
++#define EXT_MAX_EXTENT(__hdr__) \
++      (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
++#define EXT_MAX_INDEX(__hdr__) \
++      (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
++
++
++static inline struct ext3_extent_header *ext_inode_hdr(struct inode *inode)
++{
++      return (struct ext3_extent_header *) EXT3_I(inode)->i_data;
++}
++
++static inline struct ext3_extent_header *ext_block_hdr(struct buffer_head *bh)
++{
++      return (struct ext3_extent_header *) bh->b_data;
++}
++
++static inline unsigned short ext_depth(struct inode *inode)
++{
++      return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
++}
++
++static inline unsigned short ext_flags(struct ext3_extent_header *neh)
++{
++      return le16_to_cpu(neh->eh_generation) >> EXT_HDR_GEN_BITS;
++}
++
++static inline unsigned short ext_hdr_gen(struct ext3_extent_header *neh)
++{
++      return le16_to_cpu(neh->eh_generation) & EXT_HDR_GEN_MASK;
++}
++
++static inline unsigned short ext_generation(struct inode *inode)
++{
++      return ext_hdr_gen(ext_inode_hdr(inode));
++}
++
++static inline void ext3_ext_tree_changed(struct inode *inode)
++{
++      struct ext3_extent_header *neh = ext_inode_hdr(inode);
++      neh->eh_generation = cpu_to_le32(
++              ((ext_flags(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << EXT_HDR_GEN_BITS)
++                  | ((ext_hdr_gen(neh) + 1) & EXT_HDR_GEN_MASK));
++}
++
++static inline void
++ext3_ext_invalidate_cache(struct inode *inode)
++{
++      EXT3_I(inode)->i_cached_extent.ec_type = EXT3_EXT_CACHE_NO;
++}
++
++extern int ext3_ext_search_left(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *);
++extern int ext3_ext_search_right(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *);
++extern int ext3_extent_tree_init(handle_t *, struct inode *);
++extern int ext3_ext_calc_credits_for_insert(struct inode *, struct ext3_ext_path *);
++extern int ext3_ext_insert_extent(handle_t *, struct inode *, struct ext3_ext_path *, struct ext3_extent *);
++extern int ext3_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
++extern struct ext3_ext_path * ext3_ext_find_extent(struct inode *, int, struct ext3_ext_path *);
++
++#endif /* _LINUX_EXT3_EXTENTS */
++
+Index: linux-2.6.18.8/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs.h        2007-07-17 09:18:13.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs.h     2007-07-17 11:08:12.000000000 +0200
+@@ -182,8 +182,10 @@ struct ext3_group_desc
+ #define EXT3_DIRSYNC_FL                       0x00010000 /* dirsync behaviour (directories only) */
+ #define EXT3_TOPDIR_FL                        0x00020000 /* Top of directory hierarchies*/
+ #define EXT3_RESERVED_FL              0x80000000 /* reserved for ext3 lib */
++#define EXT3_EXTENTS_FL                       0x00080000 /* Inode uses extents */
++#define EXT3_SUPER_MAGIC      0xEF53
+-#define EXT3_FL_USER_VISIBLE          0x0003DFFF /* User visible flags */
++#define EXT3_FL_USER_VISIBLE          0x000BDFFF /* User visible flags */
+ #define EXT3_FL_USER_MODIFIABLE               0x000380FF /* User modifiable flags */
+ /*
+@@ -373,6 +374,8 @@ struct ext3_inode {
+ #define EXT3_MOUNT_GRPQUOTA           0x200000 /* "old" group quota */
+ #define EXT3_MOUNT_IOPEN              0x400000        /* Allow access via iopen */
+ #define EXT3_MOUNT_IOPEN_NOPRIV               0x800000/* Make iopen world-readable */
++#define EXT3_MOUNT_EXTENTS            0x2000000/* Extents support */
++#define EXT3_MOUNT_EXTDEBUG             0x4000000/* Extents debug */
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef clear_opt
+@@ -572,11 +575,13 @@ static inline int ext3_valid_inum(struct
+ #define EXT3_FEATURE_INCOMPAT_RECOVER         0x0004 /* Needs recovery */
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV     0x0008 /* Journal device */
+ #define EXT3_FEATURE_INCOMPAT_META_BG         0x0010
++#define EXT3_FEATURE_INCOMPAT_EXTENTS         0x0040 /* extents support */
+ #define EXT3_FEATURE_COMPAT_SUPP      EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT3_FEATURE_INCOMPAT_SUPP    (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT3_FEATURE_INCOMPAT_RECOVER| \
+-                                       EXT3_FEATURE_INCOMPAT_META_BG)
++                                       EXT3_FEATURE_INCOMPAT_META_BG| \
++                                       EXT3_FEATURE_INCOMPAT_EXTENTS)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP   (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+@@ -816,6 +821,9 @@ extern int ext3_get_inode_loc(struct ino
+ extern void ext3_truncate (struct inode *);
+ extern void ext3_set_inode_flags(struct inode *);
+ extern void ext3_set_aops(struct inode *inode);
++extern int ext3_writepage_trans_blocks(struct inode *);
++extern int ext3_block_truncate_page(handle_t *handle, struct page *page,
++              struct address_space *mapping, loff_t from);
+ /* ioctl.c */
+ extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
+@@ -869,6 +877,30 @@ extern struct inode_operations ext3_spec
+ extern struct inode_operations ext3_symlink_inode_operations;
+ extern struct inode_operations ext3_fast_symlink_inode_operations;
++/* extents.c */
++extern int ext3_ext_tree_init(handle_t *handle, struct inode *);
++extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
++extern int ext3_ext_get_blocks(handle_t *, struct inode *, sector_t,
++                              unsigned long, struct buffer_head *, int, int);
++extern void ext3_ext_truncate(struct inode *, struct page *);
++extern void ext3_ext_init(struct super_block *);
++extern void ext3_ext_release(struct super_block *);
++static inline int
++ext3_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
++                      unsigned long max_blocks, struct buffer_head *bh,
++                      int create, int extend_disksize)
++{
++      int ret;
++      if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
++              return ext3_ext_get_blocks(handle, inode, block, max_blocks,
++                                      bh, create, extend_disksize);
++      ret = ext3_get_blocks_handle(handle, inode, block, max_blocks, bh, create,
++                                   extend_disksize);
++      if (ret > 0)
++              bh->b_size = (ret << inode->i_blkbits);
++      return ret;
++}
++
+ #endif        /* __KERNEL__ */
+Index: linux-2.6.18.8/include/linux/ext3_fs_i.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h      2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/include/linux/ext3_fs_i.h   2007-07-17 11:08:11.000000000 +0200
+@@ -65,6 +65,16 @@ struct ext3_block_alloc_info {
+ #define rsv_end rsv_window._rsv_end
+ /*
++ * storage for cached extent
++ */
++struct ext3_ext_cache {
++      __u32   ec_start;
++      __u32   ec_block;
++      __u32   ec_len; /* must be 32bit to return holes */
++      __u32   ec_type;
++};
++
++/*
+  * third extended file system inode data in memory
+  */
+ struct ext3_inode_info {
+@@ -142,6 +152,8 @@ struct ext3_inode_info {
+        */
+       struct mutex truncate_mutex;
+       struct inode vfs_inode;
++
++      struct ext3_ext_cache i_cached_extent;
+ };
+ #endif        /* _LINUX_EXT3_FS_I */
+Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h     2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h  2007-07-17 11:08:12.000000000 +0200
+@@ -78,6 +78,16 @@ struct ext3_sb_info {
+       char *s_qf_names[MAXQUOTAS];            /* Names of quota files with journalled quota */
+       int s_jquota_fmt;                       /* Format of quota to use */
+ #endif
++
++#ifdef EXTENTS_STATS
++      /* ext3 extents stats */
++      unsigned long s_ext_min;
++      unsigned long s_ext_max;
++      unsigned long s_depth_max;
++      spinlock_t s_ext_stats_lock;
++      unsigned long s_ext_blocks;
++      unsigned long s_ext_extents;
++#endif
+ };
+ #endif        /* _LINUX_EXT3_FS_SB */
+Index: linux-2.6.18.8/include/linux/ext3_jbd.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_jbd.h       2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/include/linux/ext3_jbd.h    2007-07-17 09:18:14.000000000 +0200
+@@ -23,9 +23,17 @@
+  *
+  * We may have to touch one inode, one bitmap buffer, up to three
+  * indirection blocks, the group and superblock summaries, and the data
+- * block to complete the transaction.  */
++ * block to complete the transaction.
++ *
++ * For extents-enabled fs we may have to allocate and modify upto
++ * 5 levels of tree + root which is stored in inode. */
++
++#define EXT3_SINGLEDATA_TRANS_BLOCKS(sb)                              \
++      (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)   \
++              || test_opt(sb, EXTENTS) ? 27U : 8U)
+-#define EXT3_SINGLEDATA_TRANS_BLOCKS  8U
++/* Indicate that EXT3_SINGLEDATA_TRANS_BLOCKS takes the sb as argument */
++#define EXT3_SINGLEDATA_TRANS_BLOCKS_HAS_SB
+ /* Extended attribute operations touch at most two data buffers,
+  * two bitmap buffers, and two group summaries, in addition to the inode
+@@ -42,7 +50,7 @@
+  * superblock only gets updated once, of course, so don't bother
+  * counting that again for the quota updates. */
+-#define EXT3_DATA_TRANS_BLOCKS(sb)    (EXT3_SINGLEDATA_TRANS_BLOCKS + \
++#define EXT3_DATA_TRANS_BLOCKS(sb)    (EXT3_SINGLEDATA_TRANS_BLOCKS(sb) + \
+                                        EXT3_XATTR_TRANS_BLOCKS - 2 + \
+                                        2*EXT3_QUOTA_TRANS_BLOCKS(sb))
+@@ -78,9 +86,9 @@
+ /* Amount of blocks needed for quota insert/delete - we do some block writes
+  * but inode, sb and group updates are done only once */
+ #define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
+-              (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
++              (EXT3_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
+ #define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
+-              (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
++              (EXT3_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
+ #else
+ #define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
+ #define EXT3_QUOTA_INIT_BLOCKS(sb) 0
diff --git a/ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..ed13d2a
--- /dev/null
@@ -0,0 +1,340 @@
+Index: linux-2.6.18/fs/ext3/ioctl.c
+===================================================================
+--- linux-2.6.18.orig/fs/ext3/ioctl.c
++++ linux-2.6.18/fs/ext3/ioctl.c
+@@ -15,6 +15,7 @@
+ #include <linux/smp_lock.h>
+ #include <asm/uaccess.h>
+ #include <linux/namei.h>
++#include "fiemap.h"
+ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+               unsigned long arg)
+@@ -272,6 +272,9 @@ flags_err:
+               return err;
+       }
++      case EXT3_IOC_FIEMAP: {
++              return ext3_fiemap(inode, filp, cmd, arg);
++      }
+       default:
+Index: linux-2.6.18/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.18.orig/include/linux/ext3_fs.h
++++ linux-2.6.18/include/linux/ext3_fs.h
+@@ -249,7 +249,6 @@ struct ext3_new_group_data {
+       __u32 free_blocks_count;
+ };
+-
+ /*
+  * ioctl commands
+  */
+@@ -257,15 +256,16 @@ struct ext3_new_group_data {
+ #define       EXT3_IOC_SETFLAGS               FS_IOC_SETFLAGS
+ #define       EXT3_IOC_GETVERSION             _IOR('f', 3, long)
+ #define       EXT3_IOC_SETVERSION             _IOW('f', 4, long)
+-#define EXT3_IOC_GROUP_EXTEND         _IOW('f', 7, unsigned long)
++#define       EXT3_IOC_GETRSVSZ               _IOR('f', 5, long)
++#define       EXT3_IOC_SETRSVSZ               _IOW('f', 6, long)
++#define       EXT3_IOC_GROUP_EXTEND           _IOW('f', 7, unsigned long)
+ #define EXT3_IOC_GROUP_ADD            _IOW('f', 8,struct ext3_new_group_input)
++#define       EXT3_IOC_FIEMAP                 _IOWR('f', 10, struct fiemap)
+ #define       EXT3_IOC_GETVERSION_OLD         FS_IOC_GETVERSION
+ #define       EXT3_IOC_SETVERSION_OLD         FS_IOC_SETVERSION
+ #ifdef CONFIG_JBD_DEBUG
+ #define EXT3_IOC_WAIT_FOR_READONLY    _IOR('f', 99, long)
+ #endif
+-#define EXT3_IOC_GETRSVSZ             _IOR('f', 5, long)
+-#define EXT3_IOC_SETRSVSZ             _IOW('f', 6, long)
+ /*
+  * ioctl commands in 32 bit emulation
+@@ -1117,6 +1117,8 @@ ext3_get_blocks_wrap(handle_t *handle, s
+               bh->b_size = (ret << inode->i_blkbits);
+       return ret;
+ }
++extern int ext3_fiemap(struct inode *, struct file *, unsigned int,
++                     unsigned long);
+ #endif        /* __KERNEL__ */
+Index: linux-2.6.18/include/linux/ext3_extents.h
+===================================================================
+--- linux-2.6.18.orig/include/linux/ext3_extents.h
++++ linux-2.6.18/include/linux/ext3_extents.h
+@@ -142,8 +142,9 @@ struct ext3_ext_path {
+  * callback must return valid extent (passed or newly created)
+  */
+ typedef int (*ext_prepare_callback)(struct inode *, struct ext3_ext_path *,
+-                                      struct ext3_ext_cache *,
+-                                      void *);
++                                  struct ext3_ext_cache *,
++                                  struct ext3_extent *, void *);
++#define HAVE_EXT_PREPARE_CB_EXTENT
+ #define EXT_CONTINUE  0
+ #define EXT_BREAK     1
+@@ -152,6 +152,26 @@ typedef int (*ext_prepare_callback)(stru
+ #define EXT_MAX_BLOCK 0xffffffff
++/*
++ * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
++ * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
++ * MSB of ee_len field in the extent datastructure to signify if this
++ * particular extent is an initialized extent or an uninitialized (i.e.
++ * preallocated).
++ * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
++ * uninitialized extent.
++ * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
++ * uninitialized one. In other words, if MSB of ee_len is set, it is an
++ * uninitialized extent with only one special scenario when ee_len = 0x8000.
++ * In this case we can not have an uninitialized extent of zero length and
++ * thus we make it as a special case of initialized extent with 0x8000 length.
++ * This way we get better extent-to-group alignment for initialized extents.
++ * Hence, the maximum number of blocks we can have in an *initialized*
++ * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
++ */
++#define EXT_INIT_MAX_LEN      (1UL << 15)
++#define EXT_UNINIT_MAX_LEN    (EXT_INIT_MAX_LEN - 1)
++
+ #define EXT_FLAGS_CLR_UNKNOWN 0x7  /* Flags cleared on modification */
+ #define EXT_HDR_GEN_BITS      24
+ #define EXT_HDR_GEN_MASK      ((1 << EXT_HDR_GEN_BITS) - 1)
+@@ -219,6 +239,13 @@ ext3_ext_invalidate_cache(struct inode *
+       EXT3_I(inode)->i_cached_extent.ec_type = EXT3_EXT_CACHE_NO;
+ }
++static inline int ext3_ext_is_uninitialized(struct ext3_extent *ext)
++{
++      /* Extent with ee_len of 0x8000 is treated as an initialized extent */
++      return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
++}
++
++
+ extern int ext3_ext_search_left(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *);
+ extern int ext3_ext_search_right(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *);
+ extern int ext3_extent_tree_init(handle_t *, struct inode *);
+Index: linux-2.6.18/fs/ext3/extents.c
+===================================================================
+--- linux-2.6.18.orig/fs/ext3/extents.c
++++ linux-2.6.18/fs/ext3/extents.c
+@@ -42,7 +42,7 @@
+ #include <linux/slab.h>
+ #include <linux/ext3_extents.h>
+ #include <asm/uaccess.h>
+-
++#include "fiemap.h"
+ static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
+ {
+@@ -1477,7 +1477,7 @@ int ext3_ext_walk_space(struct inode *in
+               }
+               BUG_ON(cbex.ec_len == 0);
+-              err = func(inode, path, &cbex, cbdata);
++              err = func(inode, path, &cbex, ex, cbdata);
+               ext3_ext_drop_refs(path);
+               if (err < 0)
+@@ -2289,6 +2289,143 @@ int ext3_ext_writepage_trans_blocks(stru
+       return needed;
+ }
++struct fiemap_internal {
++      struct fiemap           *fiemap_s;
++      struct fiemap_extent    fm_extent;
++      size_t                  tot_mapping_len;
++      char                    *cur_ext_ptr;
++      int                     current_extent;
++      int                     err;
++};
++
++/*
++ * Callback function called for each extent to gather fiemap information.
++ */
++int ext3_ext_fiemap_cb(struct inode *inode, struct ext3_ext_path *path,
++                     struct ext3_ext_cache *newex, struct ext3_extent *ex,
++                     void *data)
++{
++      struct fiemap_internal *fiemap_i = data;
++      struct fiemap *fiemap_s = fiemap_i->fiemap_s;
++      struct fiemap_extent *fm_extent = &fiemap_i->fm_extent;
++      int current_extent = fiemap_i->current_extent;
++      unsigned long blksize_bits = inode->i_sb->s_blocksize_bits;
++
++      /*
++       * ext3_ext_walk_space returns a hole for extents that have not been
++       * allocated yet.
++       */
++      if (((u64)(newex->ec_block + newex->ec_len) << blksize_bits >=
++           inode->i_size) && !ext3_ext_is_uninitialized(ex) &&
++          newex->ec_type == EXT3_EXT_CACHE_GAP)
++              return EXT_BREAK;
++
++      /*
++       * We only need to return number of extents.
++       */
++      if (fiemap_s->fm_flags & FIEMAP_FLAG_NUM_EXTENTS)
++              goto count_extents;
++
++      if (current_extent >= fiemap_s->fm_extent_count)
++              return EXT_BREAK;
++
++      memset(fm_extent, 0, sizeof(*fm_extent));
++      fm_extent->fe_offset = (__u64)newex->ec_start << blksize_bits;
++      fm_extent->fe_length = (__u64)newex->ec_len << blksize_bits;
++      fiemap_i->tot_mapping_len += fm_extent->fe_length;
++
++      if (newex->ec_type == EXT3_EXT_CACHE_GAP)
++              fm_extent->fe_flags |= FIEMAP_EXTENT_HOLE;
++
++      if (ext3_ext_is_uninitialized(ex))
++              fm_extent->fe_flags |= (FIEMAP_EXTENT_DELALLOC |
++                                      FIEMAP_EXTENT_UNMAPPED);
++
++      /*
++       * Mark this fiemap_extent as FIEMAP_EXTENT_EOF if it's past the end
++       * of file.
++       */
++      if ((u64)(newex->ec_block + newex->ec_len) << blksize_bits >=
++                                                              inode->i_size)
++              fm_extent->fe_flags |= FIEMAP_EXTENT_EOF;
++
++      if (!copy_to_user(fiemap_i->cur_ext_ptr, fm_extent,
++                        sizeof(struct fiemap_extent))) {
++              fiemap_i->cur_ext_ptr += sizeof(struct fiemap_extent);
++      } else {
++              fiemap_i->err = -EFAULT;
++              return EXT_BREAK;
++      }
++
++count_extents:
++      fiemap_i->current_extent++;
++
++      /*
++       * Stop if we are beyond requested mapping size but return complete last
++       * extent.
++       */
++      if ((u64)(newex->ec_block + newex->ec_len) << blksize_bits >=
++          fiemap_s->fm_length)
++              return EXT_BREAK;
++
++      return EXT_CONTINUE;
++}
++
++int ext3_fiemap(struct inode *inode, struct file *filp, unsigned int cmd,
++              unsigned long arg)
++{
++      struct fiemap *fiemap_s;
++      struct fiemap_internal fiemap_i;
++      struct fiemap_extent *last_extent;
++      ext3_fsblk_t start_blk;
++      int err = 0;
++
++      if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
++              return -EOPNOTSUPP;
++
++      fiemap_s = kmalloc(sizeof(*fiemap_s), GFP_KERNEL);
++      if (fiemap_s == NULL)
++              return -ENOMEM;
++      if (copy_from_user(fiemap_s, (struct fiemap __user *)arg,
++                         sizeof(*fiemap_s)))
++              return -EFAULT;
++
++      if (fiemap_s->fm_flags & FIEMAP_FLAG_INCOMPAT)
++              return -EOPNOTSUPP;
++
++      if (fiemap_s->fm_flags & FIEMAP_FLAG_SYNC)
++              ext3_sync_file(filp, filp->f_dentry, 1);
++
++      start_blk = (fiemap_s->fm_start + inode->i_sb->s_blocksize - 1) >>
++                                              inode->i_sb->s_blocksize_bits;
++      fiemap_i.fiemap_s = fiemap_s;
++      fiemap_i.tot_mapping_len = 0;
++      fiemap_i.cur_ext_ptr = (char *)(arg + sizeof(*fiemap_s));
++      fiemap_i.current_extent = 0;
++      fiemap_i.err = 0;
++
++      /*
++       * Walk the extent tree gathering extent information
++       */
++      mutex_lock(&EXT3_I(inode)->truncate_mutex);
++      err = ext3_ext_walk_space(inode, start_blk , EXT_MAX_BLOCK - start_blk,
++                                ext3_ext_fiemap_cb, &fiemap_i);
++      mutex_unlock(&EXT3_I(inode)->truncate_mutex);
++      if (err)
++              return err;
++
++      fiemap_s->fm_extent_count = fiemap_i.current_extent;
++      fiemap_s->fm_length = fiemap_i.tot_mapping_len;
++      if (fiemap_i.current_extent != 0 &&
++          !(fiemap_s->fm_flags & FIEMAP_FLAG_NUM_EXTENTS)) {
++              last_extent = &fiemap_i.fm_extent;
++              last_extent->fe_flags |= FIEMAP_EXTENT_LAST;
++      }
++      err = copy_to_user((void *)arg, fiemap_s, sizeof(*fiemap_s));
++
++      return err;
++}
++
+ EXPORT_SYMBOL(ext3_mark_inode_dirty);
+ EXPORT_SYMBOL(ext3_ext_invalidate_cache);
+ EXPORT_SYMBOL(ext3_ext_insert_extent);
+Index: linux-2.6.18/fs/ext3/fiemap.h
+===================================================================
+--- /dev/null
++++ linux-2.6.18/fs/ext3/fiemap.h
+@@ -0,0 +1,49 @@
++/*
++ * linux/fs/ext3/fiemap.h
++ *
++ * Copyright (C) 2007 Cluster File Systems, Inc
++ *
++ * Author: Kalpak Shah <kalpak@clusterfs.com>
++ */
++
++#ifndef _LINUX_EXT3_FIEMAP_H
++#define _LINUX_EXT3_FIEMAP_H
++
++struct fiemap_extent {
++      __u64   fe_offset; /* offset in bytes for the start of the extent */
++      __u64   fe_length; /* length in bytes for the extent */
++      __u32   fe_flags;  /* returned FIEMAP_EXTENT_* flags for the extent */
++      __u32   fe_lun;    /* logical device number for extent (starting at 0)*/
++};
++
++/*
++ * fiemap is not ext3-specific and should be moved into fs.h eventually.
++ */
++
++struct fiemap {
++      __u64   fm_start;        /* logical starting byte offset (in/out) */
++      __u64   fm_length;       /* logical length of map (in/out) */
++      __u32   fm_flags;        /* FIEMAP_FLAG_* flags for request (in/out) */
++      __u32   fm_extent_count; /* number of extents in fm_extents (in/out) */
++      __u64   fm_unused;
++      struct fiemap_extent    fm_extents[0];
++};
++
++#define       FIEMAP_FLAG_SYNC        0x00000001 /* sync file data before map */
++#define       FIEMAP_FLAG_HSM_READ    0x00000002 /* get data from HSM before map */
++#define       FIEMAP_FLAG_NUM_EXTENTS 0x00000004 /* return only number of extents */
++#define       FIEMAP_FLAG_INCOMPAT    0xff000000 /* error for unknown flags in here */
++
++#define       FIEMAP_EXTENT_HOLE      0x00000001 /* has no data or space allocation */
++#define       FIEMAP_EXTENT_UNWRITTEN 0x00000002 /* space allocated, but no data */
++#define       FIEMAP_EXTENT_UNMAPPED  0x00000004 /* has data but no space allocation*/
++#define       FIEMAP_EXTENT_ERROR     0x00000008 /* mapping error, errno in fe_start*/
++#define       FIEMAP_EXTENT_NO_DIRECT 0x00000010 /* cannot access data directly */
++#define       FIEMAP_EXTENT_LAST      0x00000020 /* last extent in the file */
++#define       FIEMAP_EXTENT_DELALLOC  0x00000040 /* has data but not yet written,
++                                          * must have EXTENT_UNKNOWN set */
++#define       FIEMAP_EXTENT_SECONDARY 0x00000080 /* data (also) in secondary storage,
++                                          * not in primary if EXTENT_UNKNOWN*/
++#define       FIEMAP_EXTENT_EOF       0x00000100 /* if fm_start+fm_len is beyond EOF*/
++
++#endif /* _LINUX_EXT3_FIEMAP_H */
diff --git a/ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..6ea00f3
--- /dev/null
@@ -0,0 +1,128 @@
+Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c
+===================================================================
+--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/ialloc.c        2005-05-16 14:10:54.000000000 -0600
++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c     2005-05-16 14:18:29.000000000 -0600
+@@ -352,13 +352,17 @@
+       return -1;
+ }
+-static int find_group_other(struct super_block *sb, struct inode *parent)
++static int find_group_other(struct super_block *sb, struct inode *parent,
++                          int mode)
+ {
+       int parent_group = EXT3_I(parent)->i_block_group;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
+       int ngroups = EXT3_SB(sb)->s_groups_count;
+       struct ext3_group_desc *desc;
+       struct buffer_head *bh;
+       int group, i;
++      int best_group = -1;
++      int avefreeb, freeb, best_group_freeb = 0;
+       /*
+        * Try to place the inode in its parent directory
+@@ -366,9 +370,9 @@
+       group = parent_group;
+       desc = ext3_get_group_desc (sb, group, &bh);
+       if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+-                      le16_to_cpu(desc->bg_free_blocks_count))
++          (!S_ISREG(mode) || le16_to_cpu(desc->bg_free_blocks_count)))
+               return group;
+-
++      avefreeb = le32_to_cpu(sbi->s_es->s_free_blocks_count) / ngroups;
+       /*
+        * We're going to place this inode in a different blockgroup from its
+        * parent.  We want to cause files in a common directory to all land in
+@@ -381,33 +385,47 @@
+       group = (group + parent->i_ino) % ngroups;
+       /*
+-       * Use a quadratic hash to find a group with a free inode and some free
+-       * blocks.
++       * Use a quadratic hash to find a group with a free inode and
++       * average number of free blocks.
+        */
+       for (i = 1; i < ngroups; i <<= 1) {
+               group += i;
+               if (group >= ngroups)
+                       group -= ngroups;
+               desc = ext3_get_group_desc (sb, group, &bh);
+-              if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+-                              le16_to_cpu(desc->bg_free_blocks_count))
++              if (!desc || !desc->bg_free_inodes_count)
++                      continue;
++              if (!S_ISREG(mode))
++                      return group;
++              if (le16_to_cpu(desc->bg_free_blocks_count) >= avefreeb)
+                       return group;
+       }
+       /*
+-       * That failed: try linear search for a free inode, even if that group
+-       * has no free blocks.
++       * That failed: start from last group used to allocate inode
++       * try linear search for a free inode and prefereably
++       * free blocks.
+        */
+-      group = parent_group;
++      group = sbi->s_last_alloc_group;
++      if (group == -1)
++              group = parent_group;
++
+       for (i = 0; i < ngroups; i++) {
+               if (++group >= ngroups)
+                       group = 0;
+               desc = ext3_get_group_desc (sb, group, &bh);
+-              if (desc && le16_to_cpu(desc->bg_free_inodes_count))
+-                      return group;
++              if (!desc || !desc->bg_free_inodes_count)
++                      continue;
++              freeb = le16_to_cpu(desc->bg_free_blocks_count);
++              if (freeb > best_group_freeb) {
++                      best_group_freeb = freeb;
++                      best_group = group;
++                      if (freeb >= avefreeb || !S_ISREG(mode))
++                              break;
++              }
+       }
+-
+-      return -1;
++      sbi->s_last_alloc_group = best_group;
++      return best_group;
+ }
+ /*
+@@ -454,7 +472,7 @@
+               else
+                       group = find_group_orlov(sb, dir);
+       } else
+-              group = find_group_other(sb, dir);
++              group = find_group_other(sb, dir, mode);
+       err = -ENOSPC;
+       if (group == -1)
+Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c
+===================================================================
+--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/super.c 2005-05-16 14:10:54.000000000 -0600
++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c      2005-05-16 14:17:14.000000000 -0600
+@@ -1297,6 +1297,7 @@
+       percpu_counter_init(&sbi->s_dirs_counter);
+       bgl_lock_init(&sbi->s_blockgroup_lock);
++      sbi->s_last_alloc_group = -1;
+       for (i = 0; i < db_count; i++) {
+               block = descriptor_loc(sb, logic_sb_block, i);
+               sbi->s_group_desc[i] = sb_bread(sb, block);
+Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/include/linux/ext3_fs_sb.h      2005-05-16 14:10:54.000000000 -0600
++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h   2005-05-16 14:17:14.000000000 -0600
+@@ -59,6 +59,8 @@
+       struct percpu_counter s_freeinodes_counter;
+       struct percpu_counter s_dirs_counter;
+       struct blockgroup_lock s_blockgroup_lock;
++      /* Last group used to allocate inode */
++      int s_last_alloc_group;
+       /* root of the per fs reservation window tree */
+       spinlock_t s_rsv_window_lock;
diff --git a/ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch b/ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch
new file mode 100644 (file)
index 0000000..82a9855
--- /dev/null
@@ -0,0 +1,603 @@
+Index: linux-2.6.18.8/include/linux/ext3_fs_i.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h      2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs_i.h   2007-07-17 09:18:53.000000000 +0200
+@@ -154,6 +154,10 @@ struct ext3_inode_info {
+       struct inode vfs_inode;
+       struct ext3_ext_cache i_cached_extent;
++
++      /* mballoc */
++      struct list_head i_prealloc_list;
++      spinlock_t i_prealloc_lock;
+ };
+ #endif        /* _LINUX_EXT3_FS_I */
+Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h     2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h  2007-07-17 09:18:53.000000000 +0200
+@@ -21,8 +21,15 @@
+ #include <linux/wait.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
++#include <linux/list.h>
+ #endif
+ #include <linux/rbtree.h>
++#include <linux/proc_fs.h>
++
++struct ext3_buddy_group_blocks;
++struct ext3_locality_group;
++struct ext3_mb_history;
++#define EXT3_BB_MAX_BLOCKS
+ /*
+  * third extended-fs super-block data in memory
+Index: linux-2.6.18.8/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs.h        2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs.h     2007-07-17 09:18:53.000000000 +0200
+@@ -17,6 +17,7 @@
+ #define _LINUX_EXT3_FS_H
+ #include <linux/types.h>
++#include <linux/ext3_fs_i.h>
+ #include <linux/magic.h>
+ /*
+@@ -67,12 +68,12 @@
+ struct ext3_allocation_request {
+       struct inode *inode;    /* target inode for block we're allocating */
+-      unsigned long logical;  /* logical block in target inode */
+-      unsigned long goal;     /* phys. target (a hint) */
+-      unsigned long lleft;    /* the closest logical allocated block to the left */
+-      unsigned long pleft;    /* phys. block for ^^^ */
+-      unsigned long lright;   /* the closest logical allocated block to the right */
+-      unsigned long pright;   /* phys. block for ^^^ */
++      ext3_fsblk_t logical;   /* logical block in target inode */
++      ext3_fsblk_t goal;      /* phys. target (a hint) */
++      ext3_fsblk_t lleft;     /* the closest logical allocated block to the left */
++      ext3_fsblk_t pleft;     /* phys. block for ^^^ */
++      ext3_fsblk_t lright;    /* the closest logical allocated block to the right */
++      ext3_fsblk_t pright;    /* phys. block for ^^^ */
+       unsigned long len;      /* how many blocks we want to allocate */
+       unsigned long flags;    /* flags. see above EXT3_MB_HINT_* */
+ };
+@@ -400,6 +401,7 @@ struct ext3_inode {
+ #define EXT3_MOUNT_IOPEN_NOPRIV               0x800000/* Make iopen world-readable */
+ #define EXT3_MOUNT_EXTENTS            0x2000000/* Extents support */
+ #define EXT3_MOUNT_EXTDEBUG             0x4000000/* Extents debug */
++#define EXT3_MOUNT_MBALLOC            0x8000000/* Buddy allocation support */
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef clear_opt
+@@ -787,12 +789,12 @@ ext3_group_first_block_no(struct super_b
+ /* balloc.c */
+ extern int ext3_bg_has_super(struct super_block *sb, int group);
+ extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
+-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
++extern ext3_fsblk_t ext3_new_block_old (handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, int *errp);
+-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
++extern ext3_fsblk_t ext3_new_blocks_old (handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, unsigned long *count, int *errp);
+ extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
+-                      ext3_fsblk_t block, unsigned long count);
++                      ext3_fsblk_t block, unsigned long count, int metadata);
+ extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
+                                ext3_fsblk_t block, unsigned long count,
+                               unsigned long *pdquot_freed_blocks);
+@@ -836,15 +838,45 @@ extern long ext3_mb_stats;
+ extern long ext3_mb_max_to_scan;
+ extern int ext3_mb_init(struct super_block *, int);
+ extern int ext3_mb_release(struct super_block *);
+-extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *);
++extern ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
++                                 ext3_fsblk_t goal, int *errp);
++extern ext3_fsblk_t ext3_mb_new_blocks(handle_t *,
++                              struct ext3_allocation_request *, int *);
+ extern int ext3_mb_reserve_blocks(struct super_block *, int);
+ extern void ext3_mb_release_blocks(struct super_block *, int);
+ extern void ext3_mb_release_blocks(struct super_block *, int);
+ extern void ext3_mb_discard_inode_preallocations(struct inode *);
+ extern int __init init_ext3_proc(void);
+ extern void exit_ext3_proc(void);
+-extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *);
++extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long,
++                              unsigned long, int, unsigned long *);
++
++static inline ext3_fsblk_t ext3_new_blocks(handle_t *handle,
++                                         struct inode *inode,
++                                         ext3_fsblk_t goal,
++                                         unsigned long *count, int *errp)
++{
++      struct ext3_allocation_request ar;
++      ext3_fsblk_t ret;
++      if (!test_opt(inode->i_sb, MBALLOC)) {
++              ret = ext3_new_blocks_old(handle, inode, goal, count, errp);
++              return ret;
++      }
++
++      ar.inode = inode;
++      ar.goal = goal;
++      ar.len = *count;
++      ar.logical = 0;
++      ar.lleft = 0;
++      ar.pleft = 0;
++      ar.lright = 0;
++      ar.pright = 0;
++      ar.flags = 0;
++      ret = ext3_mb_new_blocks(handle, &ar, errp);
++      *count = ar.len;
++      return ret;
++}
+ /* inode.c */
+ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
+Index: linux-2.6.18.8/fs/ext3/super.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/super.c        2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/super.c     2007-07-17 09:18:53.000000000 +0200
+@@ -688,6 +688,7 @@ enum {
+       Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
+       Opt_grpquota,
+       Opt_extents, Opt_noextents, Opt_extdebug,
++      Opt_mballoc, Opt_nomballoc, Opt_stripe,
+ };
+ static match_table_t tokens = {
+@@ -743,6 +744,9 @@ static match_table_t tokens = {
+       {Opt_extents, "extents"},
+       {Opt_noextents, "noextents"},
+       {Opt_extdebug, "extdebug"},
++      {Opt_mballoc, "mballoc"},
++      {Opt_nomballoc, "nomballoc"},
++      {Opt_stripe, "stripe=%u"},
+       {Opt_err, NULL},
+       {Opt_resize, "resize"},
+ };
+@@ -1096,6 +1100,19 @@ clear_qf_name:
+               case Opt_extdebug:
+                       set_opt (sbi->s_mount_opt, EXTDEBUG);
+                       break;
++              case Opt_mballoc:
++                      set_opt(sbi->s_mount_opt, MBALLOC);
++                      break;
++              case Opt_nomballoc:
++                      clear_opt(sbi->s_mount_opt, MBALLOC);
++                      break;
++              case Opt_stripe:
++                      if (match_int(&args[0], &option))
++                              return 0;
++                      if (option < 0)
++                              return 0;
++                      sbi->s_stripe = option;
++                      break;
+               default:
+                       printk (KERN_ERR
+                               "EXT3-fs: Unrecognized mount option \"%s\" "
+@@ -1826,6 +1843,7 @@ static int ext3_fill_super (struct super
+               "writeback");
+       ext3_ext_init(sb);
++      ext3_mb_init(sb, needs_recovery);
+       lock_kernel();
+       return 0;
+Index: linux-2.6.18.8/fs/ext3/extents.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/extents.c      2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/extents.c   2007-07-17 09:18:53.000000000 +0200
+@@ -795,7 +795,7 @@ cleanup:
+               for (i = 0; i < depth; i++) {
+                       if (!ablocks[i])
+                               continue;
+-                      ext3_free_blocks(handle, inode, ablocks[i], 1);
++                      ext3_free_blocks(handle, inode, ablocks[i], 1, 1);
+               }
+       }
+       kfree(ablocks);
+@@ -1613,7 +1613,7 @@ int ext3_ext_rm_idx(handle_t *handle, st
+       ext_debug(inode, "index is empty, remove it, free block %lu\n", leaf);
+       bh = sb_find_get_block(inode->i_sb, leaf);
+       ext3_forget(handle, 1, inode, bh, leaf);
+-      ext3_free_blocks(handle, inode, leaf, 1);
++      ext3_free_blocks(handle, inode, leaf, 1, 1);
+       return err;
+ }
+@@ -1672,7 +1672,7 @@ static int ext3_remove_blocks(handle_t *
+                               unsigned long from, unsigned long to)
+ {
+       struct buffer_head *bh;
+-      int i;
++      int i, metadata = 0;
+ #ifdef EXTENTS_STATS
+       {
+@@ -1690,6 +1690,8 @@ static int ext3_remove_blocks(handle_t *
+               spin_unlock(&sbi->s_ext_stats_lock);
+       }
+ #endif
++      if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
++              metadata = 1;
+       if (from >= le32_to_cpu(ex->ee_block)
+           && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+               /* tail removal */
+@@ -1701,7 +1703,7 @@ static int ext3_remove_blocks(handle_t *
+                       bh = sb_find_get_block(inode->i_sb, start + i);
+                       ext3_forget(handle, 0, inode, bh, start + i);
+               }
+-              ext3_free_blocks(handle, inode, start, num);
++              ext3_free_blocks(handle, inode, start, num, metadata);
+       } else if (from == le32_to_cpu(ex->ee_block)
+                  && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+               printk("strange request: removal %lu-%lu from %u:%u\n",
+@@ -2034,7 +2036,7 @@ int ext3_ext_get_blocks(handle_t *handle
+       struct ext3_extent newex, *ex;
+       int goal, newblock, err = 0, depth;
+       unsigned long allocated = 0;
+-      unsigned long next;
++      struct ext3_allocation_request ar;
+       __clear_bit(BH_New, &bh_result->b_state);
+       ext_debug(inode, "blocks %d/%lu requested for inode %u\n", (int) iblock,
+@@ -2116,18 +2118,33 @@ int ext3_ext_get_blocks(handle_t *handle
+       if (S_ISREG(inode->i_mode) && (!EXT3_I(inode)->i_block_alloc_info))
+               ext3_init_block_alloc_info(inode);
++      /* find neighbour allocated blocks */
++      ar.lleft = iblock;
++      err = ext3_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
++      if (err)
++              goto out2;
++      ar.lright = iblock;
++      err = ext3_ext_search_right(inode, path, &ar.lright, &ar.pright);
++      if (err)
++              goto out2;
++
+       /* find next allocated block so that we know how many
+        * blocks we can allocate without ovelapping next extent */
+-      BUG_ON(iblock < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
+-      next = ext3_ext_next_allocated_block(path);
+-      BUG_ON(next <= iblock);
+-      allocated = next - iblock;
++      BUG_ON(ar.pright != 0 && ar.lright <= iblock);
++      if (ar.pright == 0)
++              allocated = EXT_MAX_BLOCK - iblock;
++      else
++              allocated = ar.lright - iblock;
+       if (allocated > max_blocks)
+               allocated = max_blocks;
+       /* allocate new block */
+-      goal = ext3_ext_find_goal(inode, path, iblock);
+-      newblock = ext3_new_blocks(handle, inode, goal, &allocated, &err);
++      ar.inode = inode;
++      ar.goal = ext3_ext_find_goal(inode, path, iblock);
++      ar.logical = iblock;
++      ar.len = allocated;
++      ar.flags = EXT3_MB_HINT_DATA;
++      newblock = ext3_mb_new_blocks(handle, &ar, &err);
+       if (!newblock)
+               goto out2;
+       ext_debug(inode, "allocate new block: goal %d, found %d/%lu\n",
+@@ -2137,12 +2154,16 @@ int ext3_ext_get_blocks(handle_t *handle
+       newex.ee_block = cpu_to_le32(iblock);
+       newex.ee_start = cpu_to_le32(newblock);
+       newex.ee_start_hi = 0;
+-      newex.ee_len = cpu_to_le16(allocated);
++      newex.ee_len = cpu_to_le16(ar.len);
+       err = ext3_ext_insert_extent(handle, inode, path, &newex);
+       if (err) {
+               /* free data blocks we just allocated */
+-              ext3_free_blocks(handle, inode, le32_to_cpu(newex.ee_start),
+-                              le16_to_cpu(newex.ee_len));
++              /* not a good idea to call discard here directly,
++               * but otherwise we'd need to call it every free() */
++              ext3_mb_discard_inode_preallocations(inode);
++              ext3_free_blocks(handle, inode, newex.ee_start,
++                               newex.ee_len, 0);
++
+               goto out2;
+       }
+@@ -2151,6 +2172,7 @@ int ext3_ext_get_blocks(handle_t *handle
+       /* previous routine could use block we allocated */
+       newblock = le32_to_cpu(newex.ee_start);
++      allocated = le16_to_cpu(newex.ee_len);
+       __set_bit(BH_New, &bh_result->b_state);
+       ext3_ext_put_in_cache(inode, iblock, allocated, newblock,
+@@ -2202,6 +2224,9 @@ void ext3_ext_truncate(struct inode * in
+       mutex_lock(&EXT3_I(inode)->truncate_mutex);
+       ext3_ext_invalidate_cache(inode);
++      /* it's important to discard preallocations under truncate_mutex */
++      ext3_mb_discard_inode_preallocations(inode);
++
+       /*
+        * TODO: optimization is possible here
+        * probably we need not scaning at all,
+Index: linux-2.6.18.8/fs/ext3/Makefile
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/Makefile       2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/Makefile    2007-07-17 09:18:53.000000000 +0200
+@@ -5,7 +5,7 @@
+ ext3-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
+          ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \
+-         extents.o
++         extents.o mballoc.o
+ ext3-$(CONFIG_EXT3_FS_XATTR)   += xattr.o xattr_user.o xattr_trusted.o
+ ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+Index: linux-2.6.18.8/fs/ext3/xattr.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/xattr.c        2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/fs/ext3/xattr.c     2007-07-17 09:18:53.000000000 +0200
+@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl
+               ea_bdebug(bh, "refcount now=0; freeing");
+               if (ce)
+                       mb_cache_entry_free(ce);
+-              ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
++              ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1);
+               get_bh(bh);
+               ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
+       } else {
+@@ -805,7 +805,7 @@ inserted:
+                       new_bh = sb_getblk(sb, block);
+                       if (!new_bh) {
+ getblk_failed:
+-                              ext3_free_blocks(handle, inode, block, 1);
++                              ext3_free_blocks(handle, inode, block, 1, 1);
+                               error = -EIO;
+                               goto cleanup;
+                       }
+Index: linux-2.6.18.8/fs/ext3/balloc.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/balloc.c       2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18.8/fs/ext3/balloc.c    2007-07-17 09:18:53.000000000 +0200
+@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_
+  *
+  * Return buffer_head on success or NULL in case of failure.
+  */
+-static struct buffer_head *
++struct buffer_head *
+ read_block_bitmap(struct super_block *sb, unsigned int block_group)
+ {
+       struct ext3_group_desc * desc;
+@@ -294,6 +294,8 @@ void ext3_discard_reservation(struct ino
+       struct ext3_reserve_window_node *rsv;
+       spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
++      ext3_mb_discard_inode_preallocations(inode);
++
+       if (!block_i)
+               return;
+@@ -490,19 +492,24 @@
+  * @count:            number of blocks to count
+  */
+ void ext3_free_blocks(handle_t *handle, struct inode *inode,
+-                      ext3_fsblk_t block, unsigned long count)
++                      ext3_fsblk_t block, unsigned long count, int metadata)
+ {
+-      struct super_block * sb;
+-      unsigned long dquot_freed_blocks;
++      struct super_block *sb;
++      unsigned long freed;
++
++      /* this isn't the right place to decide whether block is metadata
++       * inode.c/extents.c knows better, but for safety ... */
++      if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
++                      ext3_should_journal_data(inode))
++              metadata = 1;
+       sb = inode->i_sb;
+-      if (!sb) {
+-              printk ("ext3_free_blocks: nonexistent device");
+-              return;
+-      }
+-      ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
+-      if (dquot_freed_blocks)
+-              DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
++      if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info)
++              ext3_free_blocks_sb(handle, sb, block, count, &freed);
++      else
++              ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed);
++      if (freed)
++              DQUOT_FREE_BLOCK(inode, freed);
+       return;
+ }
+@@ -1199,7 +1205,7 @@ int ext3_should_retry_alloc(struct super
+  * bitmap, and then for any free bit if that fails.
+  * This function also updates quota and i_blocks field.
+  */
+-ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
++ext3_fsblk_t ext3_new_blocks_old(handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, unsigned long *count, int *errp)
+ {
+       struct buffer_head *bitmap_bh = NULL;
+@@ -1463,7 +1469,7 @@ out:
+       return 0;
+ }
+-ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
++ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, int *errp)
+ {
+       unsigned long count = 1;
+Index: linux-2.6.18.8/fs/ext3/inode.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/inode.c        2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/inode.c     2007-07-17 09:18:53.000000000 +0200
+@@ -560,7 +560,7 @@ static int ext3_alloc_blocks(handle_t *h
+       return ret;
+ failed_out:
+       for (i = 0; i <index; i++)
+-              ext3_free_blocks(handle, inode, new_blocks[i], 1);
++              ext3_free_blocks(handle, inode, new_blocks[i], 1, 0);
+       return ret;
+ }
+@@ -659,9 +659,9 @@ failed:
+               ext3_journal_forget(handle, branch[i].bh);
+       }
+       for (i = 0; i <indirect_blks; i++)
+-              ext3_free_blocks(handle, inode, new_blocks[i], 1);
++              ext3_free_blocks(handle, inode, new_blocks[i], 1, 0);
+-      ext3_free_blocks(handle, inode, new_blocks[i], num);
++      ext3_free_blocks(handle, inode, new_blocks[i], num, 0);
+       return err;
+ }
+@@ -758,9 +758,10 @@ err_out:
+       for (i = 1; i <= num; i++) {
+               BUFFER_TRACE(where[i].bh, "call journal_forget");
+               ext3_journal_forget(handle, where[i].bh);
+-              ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
++              ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key), 1,
++                               0);
+       }
+-      ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
++      ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
+       return err;
+ }
+@@ -1996,7 +1997,7 @@ static void ext3_clear_blocks(handle_t *
+               }
+       }
+-      ext3_free_blocks(handle, inode, block_to_free, count);
++      ext3_free_blocks(handle, inode, block_to_free, count, 0);
+ }
+ /**
+@@ -2169,7 +2170,7 @@ static void ext3_free_branches(handle_t 
+                               ext3_journal_test_restart(handle, inode);
+                       }
+-                      ext3_free_blocks(handle, inode, nr, 1);
++                      ext3_free_blocks(handle, inode, nr, 1, 1);
+                       if (parent_bh) {
+                               /*
+Index: linux-2.6.18.8/fs/ext3/mballoc.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/mballoc.c      2007-07-17 09:18:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/mballoc.c   2007-07-17 09:23:56.000000000 +0200
+@@ -350,8 +350,8 @@ struct ext3_prealloc_space {
+       spinlock_t              pa_lock;
+       atomic_t                pa_count;
+       unsigned                pa_deleted;
+-      unsigned long           pa_pstart;      /* phys. block */
+-      unsigned long           pa_lstart;      /* log. block */
++      ext3_fsblk_t            pa_pstart;      /* phys. block */
++      ext3_fsblk_t            pa_lstart;      /* log. block */
+       unsigned short          pa_len;         /* len of preallocated chunk */
+       unsigned short          pa_free;        /* how many blocks are free */
+       unsigned short          pa_linear;      /* consumed in one direction
+@@ -460,8 +460,8 @@ static struct proc_dir_entry *proc_root_
+ int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
+ struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
+-unsigned long ext3_new_blocks_old(handle_t *handle, struct inode *inode,
+-                      unsigned long goal, unsigned long *count, int *errp);
++ext3_fsblk_t ext3_new_blocks_old(handle_t *handle, struct inode *inode,
++                      ext3_fsblk_t goal, unsigned long *count, int *errp);
+ void ext3_mb_release_blocks(struct super_block *, int);
+ void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
+ void ext3_mb_free_committed_blocks(struct super_block *);
+@@ -479,7 +479,7 @@ int ext3_mb_destroy_per_dev_proc(struct 
+  * Calculate the block group number and offset, given a block number
+  */
+ static void ext3_get_group_no_and_offset(struct super_block *sb,
+-                                      unsigned long blocknr,
++                                      ext3_fsblk_t blocknr,
+                                       unsigned long *blockgrpp,
+                                       unsigned long *offsetp)
+ {
+@@ -517,12 +517,12 @@ ext3_is_group_locked(struct super_block 
+                                       &EXT3_GROUP_INFO(sb, group)->bb_state);
+ }
+-unsigned long ext3_grp_offs_to_block(struct super_block *sb,
++ext3_fsblk_t ext3_grp_offs_to_block(struct super_block *sb,
+                                       struct ext3_free_extent *fex)
+ {
+-      unsigned long block;
++      ext3_fsblk_t block;
+-      block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
++      block = (ext3_fsblk_t) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
+                       + fex->fe_start
+                       + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
+       return block;
+@@ -3174,7 +3174,7 @@ void ext3_mb_collect_stats(struct ext3_a
+ void ext3_mb_use_inode_pa(struct ext3_allocation_context *ac,
+                               struct ext3_prealloc_space *pa)
+ {
+-      unsigned long start, len;
++      ext3_fsblk_t start, len;
+       /* found preallocated blocks, use them */
+       start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
+@@ -4027,13 +4027,13 @@ int ext3_mb_discard_preallocations(struc
+  * it tries to use preallocation first, then falls back
+  * to usual allocation
+  */
+-unsigned long ext3_mb_new_blocks(handle_t *handle,
++ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle,
+                                struct ext3_allocation_request *ar, int *errp)
+ {
+       struct ext3_allocation_context ac;
+       struct ext3_sb_info *sbi;
+       struct super_block *sb;
+-      unsigned long block = 0;
++      ext3_fsblk_t block = 0;
+       int freed, inquota;
+       sb = ar->inode->i_sb;
+@@ -4044,8 +4044,8 @@ unsigned long ext3_mb_new_blocks(handle_
+               if (ext3_mballoc_warning++ == 0)
+                       printk(KERN_ERR "EXT3-fs: multiblock request with "
+                                       "mballoc disabled!\n");
+-              ar->len = 1;
+-              block = ext3_new_block_old(handle, ar->inode, ar->goal, errp);
++              block = ext3_new_blocks_old(handle, ar->inode, ar->goal,
++                                          &ar->len, errp);
+               return block;
+       }
+@@ -4109,11 +4109,11 @@ out:
+ }
+ EXPORT_SYMBOL(ext3_mb_new_blocks);
+-int ext3_new_block(handle_t *handle, struct inode *inode,
+-                 unsigned long goal, int *errp)
++ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
++                 ext3_fsblk_t goal, int *errp)
+ {
+       struct ext3_allocation_request ar;
+-      unsigned long ret;
++      ext3_fsblk_t ret;
+       if (!test_opt(inode->i_sb, MBALLOC)) {
+               ret = ext3_new_block_old(handle, inode, goal, errp);
+@@ -4228,8 +4228,8 @@ int ext3_mb_free_metadata(handle_t *hand
+  * Main entry point into mballoc to free blocks
+  */
+ void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
+-                      unsigned long block, unsigned long count,
+-                      int metadata, int *freed)
++                      ext3_fsblk_t block, unsigned long count,
++                      int metadata, unsigned long *freed)
+ {
+       struct buffer_head *bitmap_bh = NULL;
+       struct super_block *sb = inode->i_sb;
diff --git a/ldiskfs/kernel_patches/patches/ext3-mballoc3-core-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-mballoc3-core-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..7c4ec5a
--- /dev/null
@@ -0,0 +1,4590 @@
+Index: linux-2.6.9-full/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.9-full.orig/include/linux/ext3_fs.h      2007-06-08 23:44:08.000000000 +0400
++++ linux-2.6.9-full/include/linux/ext3_fs.h   2007-10-17 22:25:01.000000000 +0400
+@@ -57,6 +57,30 @@ struct statfs;
+ #define ext3_debug(f, a...)   do {} while (0)
+ #endif
++#define EXT3_MULTIBLOCK_ALLOCATOR     1
++
++#define EXT3_MB_HINT_MERGE            1       /* prefer goal again. length */
++#define EXT3_MB_HINT_RESERVED         2       /* blocks already reserved */
++#define EXT3_MB_HINT_METADATA         4       /* metadata is being allocated */
++#define EXT3_MB_HINT_FIRST            8       /* first blocks in the file */
++#define EXT3_MB_HINT_BEST             16      /* search for the best chunk */
++#define EXT3_MB_HINT_DATA             32      /* data is being allocated */
++#define EXT3_MB_HINT_NOPREALLOC               64      /* don't preallocate (for tails) */
++#define EXT3_MB_HINT_GROUP_ALLOC      128     /* allocate for locality group */
++#define EXT3_MB_HINT_GOAL_ONLY                256     /* allocate goal blocks or none */
++
++struct ext3_allocation_request {
++      struct inode *inode;    /* target inode for block we're allocating */
++      unsigned long logical;  /* logical block in target inode */
++      unsigned long goal;     /* phys. target (a hint) */
++      unsigned long lleft;    /* the closest logical allocated block to the left */
++      unsigned long pleft;    /* phys. block for ^^^ */
++      unsigned long lright;   /* the closest logical allocated block to the right */
++      unsigned long pright;   /* phys. block for ^^^ */
++      unsigned long len;      /* how many blocks we want to allocate */
++      unsigned long flags;    /* flags. see above EXT3_MB_HINT_* */
++};
++
+ /*
+  * Special inodes numbers
+  */
+@@ -387,6 +411,14 @@ struct ext3_inode {
+ #define ext3_find_first_zero_bit      ext2_find_first_zero_bit
+ #define ext3_find_next_zero_bit               ext2_find_next_zero_bit
++#ifndef ext2_find_next_le_bit
++#ifdef __LITTLE_ENDIAN
++#define ext2_find_next_le_bit(addr, size, off) find_next_bit((addr), (size), (off))
++#else
++#error "mballoc needs a patch for big-endian systems - CFS bug 10634"
++#endif        /* __LITTLE_ENDIAN */
++#endif        /* !ext2_find_next_le_bit */
++
+ /*
+  * Maximal mount counts between two filesystem checks
+  */
+@@ -763,6 +795,20 @@ extern unsigned long ext3_count_dirs (st
+ extern void ext3_check_inodes_bitmap (struct super_block *);
+ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
++/* mballoc.c */
++extern long ext3_mb_stats;
++extern long ext3_mb_max_to_scan;
++extern int ext3_mb_init(struct super_block *, int);
++extern int ext3_mb_release(struct super_block *);
++extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *);
++extern int ext3_mb_reserve_blocks(struct super_block *, int);
++extern void ext3_mb_release_blocks(struct super_block *, int);
++extern void ext3_mb_release_blocks(struct super_block *, int);
++extern void ext3_mb_discard_inode_preallocations(struct inode *);
++extern int __init init_ext3_proc(void);
++extern void exit_ext3_proc(void);
++extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *);
++
+ /* inode.c */
+ extern int ext3_block_truncate_page(handle_t *, struct page *,
+Index: linux-2.6.9-full/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.9-full.orig/include/linux/ext3_fs_sb.h   2007-06-08 23:44:07.000000000 +0400
++++ linux-2.6.9-full/include/linux/ext3_fs_sb.h        2007-10-17 22:25:01.000000000 +0400
+@@ -81,6 +81,61 @@ struct ext3_sb_info {
+       char *s_qf_names[MAXQUOTAS];            /* Names of quota files with journalled quota */
+       int s_jquota_fmt;                       /* Format of quota to use */
+ #endif
++
++      /* for buddy allocator */
++      struct ext3_group_info ***s_group_info;
++      struct inode *s_buddy_cache;
++      long s_blocks_reserved;
++      spinlock_t s_reserve_lock;
++      struct list_head s_active_transaction;
++      struct list_head s_closed_transaction;
++      struct list_head s_committed_transaction;
++      spinlock_t s_md_lock;
++      tid_t s_last_transaction;
++      unsigned short *s_mb_offsets, *s_mb_maxs;
++
++      /* tunables */
++      unsigned long s_mb_factor;
++      unsigned long s_stripe;
++      unsigned long s_mb_stream_request;
++      unsigned long s_mb_max_to_scan;
++      unsigned long s_mb_min_to_scan;
++      unsigned long s_mb_max_groups_to_scan;
++      unsigned long s_mb_stats;
++      unsigned long s_mb_order2_reqs;
++
++      /* history to debug policy */
++      struct ext3_mb_history *s_mb_history;
++      int s_mb_history_cur;
++      int s_mb_history_max;
++      int s_mb_history_num;
++      struct proc_dir_entry *s_mb_proc;
++      spinlock_t s_mb_history_lock;
++      int s_mb_history_filter;
++
++      /* stats for buddy allocator */
++      spinlock_t s_mb_pa_lock;
++      atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
++      atomic_t s_bal_success; /* we found long enough chunks */
++      atomic_t s_bal_allocated;       /* in blocks */
++      atomic_t s_bal_ex_scanned;      /* total extents scanned */
++      atomic_t s_bal_goals;   /* goal hits */
++      atomic_t s_bal_breaks;  /* too long searches */
++      atomic_t s_bal_2orders; /* 2^order hits */
++      spinlock_t s_bal_lock;
++      unsigned long s_mb_buddies_generated;
++      unsigned long long s_mb_generation_time;
++      atomic_t s_mb_lost_chunks;
++      atomic_t s_mb_preallocated;
++      atomic_t s_mb_discarded;
++
++      /* locality groups */
++      struct ext3_locality_group *s_locality_groups;
++
+ };
++#define EXT3_GROUP_INFO(sb, group)                                       \
++      EXT3_SB(sb)->s_group_info[(group) >> EXT3_DESC_PER_BLOCK_BITS(sb)] \
++                               [(group) & (EXT3_DESC_PER_BLOCK(sb) - 1)]
++
+ #endif        /* _LINUX_EXT3_FS_SB */
+Index: linux-2.6.9-full/fs/ext3/super.c
+===================================================================
+--- linux-2.6.9-full.orig/fs/ext3/super.c      2007-06-08 23:44:08.000000000 +0400
++++ linux-2.6.9-full/fs/ext3/super.c   2007-10-17 22:26:27.000000000 +0400
+@@ -394,6 +394,7 @@ void ext3_put_super (struct super_block 
+       struct ext3_super_block *es = sbi->s_es;
+       int i;
++      ext3_mb_release(sb);
+       ext3_ext_release(sb);
+       ext3_xattr_put_super(sb);
+       journal_destroy(sbi->s_journal);
+@@ -463,6 +464,8 @@ static struct inode *ext3_alloc_inode(st
+       ei->vfs_inode.i_version = 1;
+       memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
++      INIT_LIST_HEAD(&ei->i_prealloc_list);
++      spin_lock_init(&ei->i_prealloc_lock);
+       return &ei->vfs_inode;
+ }
+@@ -2576,7 +2579,13 @@ static struct file_system_type ext3_fs_t
+ static int __init init_ext3_fs(void)
+ {
+-      int err = init_ext3_xattr();
++      int err;
++
++      err = init_ext3_proc();
++      if (err)
++              return err;
++
++      err = init_ext3_xattr();
+       if (err)
+               return err;
+       err = init_inodecache();
+@@ -2598,6 +2607,7 @@ static void __exit exit_ext3_fs(void)
+       unregister_filesystem(&ext3_fs_type);
+       destroy_inodecache();
+       exit_ext3_xattr();
++      exit_ext3_proc();
+ }
+ int ext3_prep_san_write(struct inode *inode, long *blocks,
+Index: linux-2.6.9-full/fs/ext3/mballoc.c
+===================================================================
+--- linux-2.6.9-full.orig/fs/ext3/mballoc.c    2007-10-17 21:59:51.072534980 +0400
++++ linux-2.6.9-full/fs/ext3/mballoc.c 2007-10-17 23:09:22.000000000 +0400
+@@ -0,0 +1,4404 @@
++/*
++ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
++ * Written by Alex Tomas <alex@clusterfs.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public Licens
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
++ */
++
++
++/*
++ * mballoc.c contains the multiblocks allocation routines
++ */
++
++#include <linux/time.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/ext3_jbd.h>
++#include <linux/jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/quotaops.h>
++#include <linux/buffer_head.h>
++#include <linux/module.h>
++#include <linux/swap.h>
++#include <linux/proc_fs.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/version.h>
++
++/*
++ * MUSTDO:
++ *   - test ext3_ext_search_left() and ext3_ext_search_right()
++ *   - search for metadata in few groups
++ *
++ * TODO v4:
++ *   - normalization should take into account whether file is still open
++ *   - discard preallocations if no free space left (policy?)
++ *   - don't normalize tails
++ *   - quota
++ *   - reservation for superuser
++ *
++ * TODO v3:
++ *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
++ *   - track min/max extents in each group for better group selection
++ *   - mb_mark_used() may allocate chunk right after splitting buddy
++ *   - tree of groups sorted by number of free blocks
++ *   - error handling
++ */
++
++/*
++ * mballoc operates on the following data:
++ *  - on-disk bitmap
++ *  - in-core buddy (actually includes buddy and bitmap)
++ *  - preallocation descriptors (PAs)
++ *
++ * there are two types of preallocations:
++ *  - inode
++ *    assiged to specific inode and can be used for this inode only.
++ *    it describes part of inode's space preallocated to specific
++ *    physical blocks. any block from that preallocated can be used
++ *    independent. the descriptor just tracks number of blocks left
++ *    unused. so, before taking some block from descriptor, one must
++ *    make sure corresponded logical block isn't allocated yet. this
++ *    also means that freeing any block within descriptor's range
++ *    must discard all preallocated blocks.
++ *  - locality group
++ *    assigned to specific locality group which does not translate to
++ *    permanent set of inodes: inode can join and leave group. space
++ *    from this type of preallocation can be used for any inode. thus
++ *    it's consumed from the beginning to the end.
++ *
++ * relation between them can be expressed as:
++ *    in-core buddy = on-disk bitmap + preallocation descriptors
++ *
++ * this mean blocks mballoc considers used are:
++ *  - allocated blocks (persistent)
++ *  - preallocated blocks (non-persistent)
++ *
++ * consistency in mballoc world means that at any time a block is either
++ * free or used in ALL structures. notice: "any time" should not be read
++ * literally -- time is discrete and delimited by locks.
++ *
++ *  to keep it simple, we don't use block numbers, instead we count number of
++ *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
++ *
++ * all operations can be expressed as:
++ *  - init buddy:                     buddy = on-disk + PAs
++ *  - new PA:                         buddy += N; PA = N
++ *  - use inode PA:                   on-disk += N; PA -= N
++ *  - discard inode PA                        buddy -= on-disk - PA; PA = 0
++ *  - use locality group PA           on-disk += N; PA -= N
++ *  - discard locality group PA               buddy -= PA; PA = 0
++ *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
++ *        is used in real operation because we can't know actual used
++ *        bits from PA, only from on-disk bitmap
++ *
++ * if we follow this strict logic, then all operations above should be atomic.
++ * given some of them can block, we'd have to use something like semaphores
++ * killing performance on high-end SMP hardware. let's try to relax it using
++ * the following knowledge:
++ *  1) if buddy is referenced, it's already initialized
++ *  2) while block is used in buddy and the buddy is referenced,
++ *     nobody can re-allocate that block
++ *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
++ *     bit set and PA claims same block, it's OK. IOW, one can set bit in
++ *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
++ *     block
++ *
++ * so, now we're building a concurrency table:
++ *  - init buddy vs.
++ *    - new PA
++ *      blocks for PA are allocated in the buddy, buddy must be referenced
++ *      until PA is linked to allocation group to avoid concurrent buddy init
++ *    - use inode PA
++ *      we need to make sure that either on-disk bitmap or PA has uptodate data
++ *      given (3) we care that PA-=N operation doesn't interfere with init
++ *    - discard inode PA
++ *      the simplest way would be to have buddy initialized by the discard
++ *    - use locality group PA
++ *      again PA-=N must be serialized with init
++ *    - discard locality group PA
++ *      the simplest way would be to have buddy initialized by the discard
++ *  - new PA vs.
++ *    - use inode PA
++ *      i_truncate_mutex serializes them
++ *    - discard inode PA
++ *      discard process must wait until PA isn't used by another process
++ *    - use locality group PA
++ *      some mutex should serialize them
++ *    - discard locality group PA
++ *      discard process must wait until PA isn't used by another process
++ *  - use inode PA
++ *    - use inode PA
++ *      i_truncate_mutex or another mutex should serializes them
++ *    - discard inode PA
++ *      discard process must wait until PA isn't used by another process
++ *    - use locality group PA
++ *      nothing wrong here -- they're different PAs covering different blocks
++ *    - discard locality group PA
++ *      discard process must wait until PA isn't used by another process
++ *
++ * now we're ready to make few consequences:
++ *  - PA is referenced and while it is no discard is possible
++ *  - PA is referenced until block isn't marked in on-disk bitmap
++ *  - PA changes only after on-disk bitmap
++ *  - discard must not compete with init. either init is done before
++ *    any discard or they're serialized somehow
++ *  - buddy init as sum of on-disk bitmap and PAs is done atomically
++ *
++ * a special case when we've used PA to emptiness. no need to modify buddy
++ * in this case, but we should care about concurrent init
++ *
++ */
++
++ /*
++ * Logic in few words:
++ *
++ *  - allocation:
++ *    load group
++ *    find blocks
++ *    mark bits in on-disk bitmap
++ *    release group
++ *
++ *  - use preallocation:
++ *    find proper PA (per-inode or group)
++ *    load group
++ *    mark bits in on-disk bitmap
++ *    release group
++ *    release PA
++ *
++ *  - free:
++ *    load group
++ *    mark bits in on-disk bitmap
++ *    release group
++ *
++ *  - discard preallocations in group:
++ *    mark PAs deleted
++ *    move them onto local list
++ *    load on-disk bitmap
++ *    load group
++ *    remove PA from object (inode or locality group)
++ *    mark free blocks in-core
++ *
++ *  - discard inode's preallocations:
++ */
++
++/*
++ * Locking rules
++ *
++ * Locks:
++ *  - bitlock on a group      (group)
++ *  - object (inode/locality) (object)
++ *  - per-pa lock             (pa)
++ *
++ * Paths:
++ *  - new pa
++ *    object
++ *    group
++ *
++ *  - find and use pa:
++ *    pa
++ *
++ *  - release consumed pa:
++ *    pa
++ *    group
++ *    object
++ *
++ *  - generate in-core bitmap:
++ *    group
++ *        pa
++ *
++ *  - discard all for given object (inode, locality group):
++ *    object
++ *        pa
++ *    group
++ *
++ *  - discard all for given group:
++ *    group
++ *        pa
++ *    group
++ *        object
++ *
++ */
++
++/*
++ * with AGGRESSIVE_CHECK allocator runs consistency checks over
++ * structures. these checks slow things down a lot
++ */
++#define AGGRESSIVE_CHECK__
++
++/*
++ * with DOUBLE_CHECK defined mballoc creates persistent in-core
++ * bitmaps, maintains and uses them to check for double allocations
++ */
++#define DOUBLE_CHECK__
++
++/*
++ */
++#define MB_DEBUG__
++#ifdef MB_DEBUG
++#define mb_debug(fmt,a...)    printk(fmt, ##a)
++#else
++#define mb_debug(fmt,a...)
++#endif
++
++/*
++ * with EXT3_MB_HISTORY mballoc stores last N allocations in memory
++ * and you can monitor it in /proc/fs/ext3/<dev>/mb_history
++ */
++#define EXT3_MB_HISTORY
++#define EXT3_MB_HISTORY_ALLOC         1       /* allocation */
++#define EXT3_MB_HISTORY_PREALLOC      2       /* preallocated blocks used */
++#define EXT3_MB_HISTORY_DISCARD               4       /* preallocation discarded */
++#define EXT3_MB_HISTORY_FREE          8       /* free */
++
++#define EXT3_MB_HISTORY_DEFAULT               (EXT3_MB_HISTORY_ALLOC | \
++                                       EXT3_MB_HISTORY_PREALLOC | \
++                                       EXT3_MB_HISTORY_DISCARD | \
++                                       EXT3_MB_HISTORY_FREE)
++
++/*
++ * How long mballoc can look for a best extent (in found extents)
++ */
++#define MB_DEFAULT_MAX_TO_SCAN                200
++
++/*
++ * How long mballoc must look for a best extent
++ */
++#define MB_DEFAULT_MIN_TO_SCAN                10
++
++/*
++ * How many groups mballoc will scan looking for the best chunk
++ */
++#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
++
++/*
++ * with 'ext3_mb_stats' allocator will collect stats that will be
++ * shown at umount. The collecting costs though!
++ */
++#define MB_DEFAULT_STATS              1
++
++/*
++ * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
++ * by the stream allocator, which purpose is to pack requests
++ * as close each to other as possible to produce smooth I/O traffic
++ */
++#define MB_DEFAULT_STREAM_THRESHOLD   16      /* 64K */
++
++/*
++ * for which requests use 2^N search using buddies
++ */
++#define MB_DEFAULT_ORDER2_REQS                8
++
++/*
++ * default stripe size = 1MB
++ */
++#define MB_DEFAULT_STRIPE             256
++
++static struct kmem_cache *ext3_pspace_cachep = NULL;
++
++#ifdef EXT3_BB_MAX_BLOCKS
++#undef EXT3_BB_MAX_BLOCKS
++#endif
++#define EXT3_BB_MAX_BLOCKS    30
++
++struct ext3_free_metadata {
++      unsigned short group;
++      unsigned short num;
++      unsigned short blocks[EXT3_BB_MAX_BLOCKS];
++      struct list_head list;
++};
++
++struct ext3_group_info {
++      unsigned long   bb_state;
++      unsigned long   bb_tid;
++      struct ext3_free_metadata *bb_md_cur;
++      unsigned short  bb_first_free;
++      unsigned short  bb_free;
++      unsigned short  bb_fragments;
++      struct          list_head bb_prealloc_list;
++#ifdef DOUBLE_CHECK
++      void            *bb_bitmap;
++#endif
++      unsigned short  bb_counters[];
++};
++
++#define EXT3_GROUP_INFO_NEED_INIT_BIT 0
++#define EXT3_GROUP_INFO_LOCKED_BIT    1
++
++#define EXT3_MB_GRP_NEED_INIT(grp)    \
++      (test_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &(grp)->bb_state))
++
++
++struct ext3_prealloc_space {
++      struct list_head        pa_inode_list;
++      struct list_head        pa_group_list;
++      union {
++              struct list_head pa_tmp_list;
++              struct rcu_head pa_rcu;
++      } u;
++      spinlock_t              pa_lock;
++      atomic_t                pa_count;
++      unsigned                pa_deleted;
++      unsigned long           pa_pstart;      /* phys. block */
++      unsigned long           pa_lstart;      /* log. block */
++      unsigned short          pa_len;         /* len of preallocated chunk */
++      unsigned short          pa_free;        /* how many blocks are free */
++      unsigned short          pa_linear;      /* consumed in one direction
++                                               * strictly, for group prealloc */
++      spinlock_t              *pa_obj_lock;
++      struct inode            *pa_inode;      /* hack, for history only */
++};
++
++
++struct ext3_free_extent {
++      unsigned long fe_logical;
++      unsigned long fe_start;
++      unsigned long fe_group;
++      unsigned long fe_len;
++};
++
++/*
++ * Locality group:
++ *   we try to group all related changes together
++ *   so that writeback can flush/allocate them together as well
++ */
++struct ext3_locality_group {
++      /* for allocator */
++      struct semaphore        lg_sem;         /* to serialize allocates */
++      struct list_head        lg_prealloc_list;/* list of preallocations */
++      spinlock_t              lg_prealloc_lock;
++};
++
++struct ext3_allocation_context {
++      struct inode *ac_inode;
++      struct super_block *ac_sb;
++
++      /* original request */
++      struct ext3_free_extent ac_o_ex;
++
++      /* goal request (after normalization) */
++      struct ext3_free_extent ac_g_ex;
++
++      /* the best found extent */
++      struct ext3_free_extent ac_b_ex;
++
++      /* copy of the bext found extent taken before preallocation efforts */
++      struct ext3_free_extent ac_f_ex;
++
++      /* number of iterations done. we have to track to limit searching */
++      unsigned long ac_ex_scanned;
++      __u16 ac_groups_scanned;
++      __u16 ac_found;
++      __u16 ac_tail;
++      __u16 ac_buddy;
++      __u16 ac_flags;         /* allocation hints */
++      __u8 ac_status;
++      __u8 ac_criteria;
++      __u8 ac_repeats;
++      __u8 ac_2order;         /* if request is to allocate 2^N blocks and
++                               * N > 0, the field stores N, otherwise 0 */
++      __u8 ac_op;             /* operation, for history only */
++      struct page *ac_bitmap_page;
++      struct page *ac_buddy_page;
++      struct ext3_prealloc_space *ac_pa;
++      struct ext3_locality_group *ac_lg;
++};
++
++#define AC_STATUS_CONTINUE    1
++#define AC_STATUS_FOUND               2
++#define AC_STATUS_BREAK               3
++
++struct ext3_mb_history {
++      struct ext3_free_extent orig;   /* orig allocation */
++      struct ext3_free_extent goal;   /* goal allocation */
++      struct ext3_free_extent result; /* result allocation */
++      unsigned pid;
++      unsigned ino;
++      __u16 found;    /* how many extents have been found */
++      __u16 groups;   /* how many groups have been scanned */
++      __u16 tail;     /* what tail broke some buddy */
++      __u16 buddy;    /* buddy the tail ^^^ broke */
++      __u16 flags;
++      __u8 cr:3;      /* which phase the result extent was found at */
++      __u8 op:4;
++      __u8 merged:1;
++};
++
++struct ext3_buddy {
++      struct page *bd_buddy_page;
++      void *bd_buddy;
++      struct page *bd_bitmap_page;
++      void *bd_bitmap;
++      struct ext3_group_info *bd_info;
++      struct super_block *bd_sb;
++      __u16 bd_blkbits;
++      __u16 bd_group;
++};
++#define EXT3_MB_BITMAP(e3b)   ((e3b)->bd_bitmap)
++#define EXT3_MB_BUDDY(e3b)    ((e3b)->bd_buddy)
++
++#ifndef EXT3_MB_HISTORY
++#define ext3_mb_store_history(ac)
++#else
++static void ext3_mb_store_history(struct ext3_allocation_context *ac);
++#endif
++
++#define in_range(b, first, len)       ((b) >= (first) && (b) <= (first) + (len) - 1)
++
++static struct proc_dir_entry *proc_root_ext3;
++
++int ext3_create (struct inode *, struct dentry *, int, struct nameidata *);
++struct buffer_head * read_block_bitmap(struct super_block *, unsigned int);
++unsigned long ext3_new_blocks_old(handle_t *handle, struct inode *inode,
++                      unsigned long goal, unsigned long *count, int *errp);
++void ext3_mb_release_blocks(struct super_block *, int);
++void ext3_mb_poll_new_transaction(struct super_block *, handle_t *);
++void ext3_mb_free_committed_blocks(struct super_block *);
++void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group);
++void ext3_mb_free_consumed_preallocations(struct ext3_allocation_context *ac);
++void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
++                                      sector_t block, int count);
++void ext3_mb_show_ac(struct ext3_allocation_context *ac);
++void ext3_mb_check_with_pa(struct ext3_buddy *e3b, int first, int count);
++void ext3_mb_put_pa(struct ext3_allocation_context *, struct super_block *, struct ext3_prealloc_space *pa);
++int ext3_mb_init_per_dev_proc(struct super_block *sb);
++int ext3_mb_destroy_per_dev_proc(struct super_block *sb);
++
++/*
++ * Calculate the block group number and offset, given a block number
++ */
++static void ext3_get_group_no_and_offset(struct super_block *sb,
++                                      unsigned long blocknr,
++                                      unsigned long *blockgrpp,
++                                      unsigned long *offsetp)
++{
++      struct ext3_super_block *es = EXT3_SB(sb)->s_es;
++      unsigned long offset;
++
++      blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
++      offset = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
++      blocknr = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
++      if (offsetp)
++              *offsetp = offset;
++      if (blockgrpp)
++              *blockgrpp = blocknr;
++
++}
++
++static inline void
++ext3_lock_group(struct super_block *sb, int group)
++{
++      bit_spin_lock(EXT3_GROUP_INFO_LOCKED_BIT,
++                    &EXT3_GROUP_INFO(sb, group)->bb_state);
++}
++
++static inline void
++ext3_unlock_group(struct super_block *sb, int group)
++{
++      bit_spin_unlock(EXT3_GROUP_INFO_LOCKED_BIT,
++                      &EXT3_GROUP_INFO(sb, group)->bb_state);
++}
++
++static inline int
++ext3_is_group_locked(struct super_block *sb, int group)
++{
++      return bit_spin_is_locked(EXT3_GROUP_INFO_LOCKED_BIT,
++                                      &EXT3_GROUP_INFO(sb, group)->bb_state);
++}
++
++unsigned long ext3_grp_offs_to_block(struct super_block *sb,
++                                      struct ext3_free_extent *fex)
++{
++      unsigned long block;
++
++      block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb)
++                      + fex->fe_start
++                      + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
++      return block;
++}
++
++#if BITS_PER_LONG == 64
++#define mb_correct_addr_and_bit(bit,addr)             \
++{                                                     \
++      bit += ((unsigned long) addr & 7UL) << 3;       \
++      addr = (void *) ((unsigned long) addr & ~7UL);  \
++}
++#elif BITS_PER_LONG == 32
++#define mb_correct_addr_and_bit(bit,addr)             \
++{                                                     \
++      bit += ((unsigned long) addr & 3UL) << 3;       \
++      addr = (void *) ((unsigned long) addr & ~3UL);  \
++}
++#else
++#error "how many bits you are?!"
++#endif
++
++static inline int mb_test_bit(int bit, void *addr)
++{
++      mb_correct_addr_and_bit(bit,addr);
++      return ext2_test_bit(bit, addr);
++}
++
++static inline void mb_set_bit(int bit, void *addr)
++{
++      mb_correct_addr_and_bit(bit,addr);
++      ext2_set_bit(bit, addr);
++}
++
++static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
++{
++      mb_correct_addr_and_bit(bit,addr);
++      ext2_set_bit_atomic(lock, bit, addr);
++}
++
++static inline void mb_clear_bit(int bit, void *addr)
++{
++      mb_correct_addr_and_bit(bit,addr);
++      ext2_clear_bit(bit, addr);
++}
++
++static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
++{
++      mb_correct_addr_and_bit(bit,addr);
++      ext2_clear_bit_atomic(lock, bit, addr);
++}
++
++static inline int mb_find_next_zero_bit(void *addr, int max, int start)
++{
++      int fix;
++#if BITS_PER_LONG == 64
++      fix = ((unsigned long) addr & 7UL) << 3;
++      addr = (void *) ((unsigned long) addr & ~7UL);
++#elif BITS_PER_LONG == 32
++      fix = ((unsigned long) addr & 3UL) << 3;
++      addr = (void *) ((unsigned long) addr & ~3UL);
++#else
++#error "how many bits you are?!"
++#endif
++      max += fix;
++      start += fix;
++      return ext2_find_next_zero_bit(addr, max, start) - fix;
++}
++
++static inline int mb_find_next_bit(void *addr, int max, int start)
++{
++      int fix;
++#if BITS_PER_LONG == 64
++      fix = ((unsigned long) addr & 7UL) << 3;
++      addr = (void *) ((unsigned long) addr & ~7UL);
++#elif BITS_PER_LONG == 32
++      fix = ((unsigned long) addr & 3UL) << 3;
++      addr = (void *) ((unsigned long) addr & ~3UL);
++#else
++#error "how many bits you are?!"
++#endif
++      max += fix;
++      start += fix;
++
++#ifdef __BIG_ENDIAN
++#else
++      return find_next_bit(addr, max, start) - fix;
++#endif
++}
++
++static inline void *mb_find_buddy(struct ext3_buddy *e3b, int order, int *max)
++{
++      char *bb;
++
++      BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
++      BUG_ON(max == NULL);
++
++      if (order > e3b->bd_blkbits + 1) {
++              *max = 0;
++              return NULL;
++      }
++
++      /* at order 0 we see each particular block */
++      *max = 1 << (e3b->bd_blkbits + 3);
++      if (order == 0)
++              return EXT3_MB_BITMAP(e3b);
++
++      bb = EXT3_MB_BUDDY(e3b) + EXT3_SB(e3b->bd_sb)->s_mb_offsets[order];
++      *max = EXT3_SB(e3b->bd_sb)->s_mb_maxs[order];
++
++      return bb;
++}
++
++#ifdef DOUBLE_CHECK
++void mb_free_blocks_double(struct inode *inode, struct ext3_buddy *e3b,
++                         int first, int count)
++{
++      int i;
++      struct super_block *sb = e3b->bd_sb;
++
++      if (unlikely(e3b->bd_info->bb_bitmap == NULL))
++              return;
++      BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
++      for (i = 0; i < count; i++) {
++              if (!mb_test_bit(first + i, e3b->bd_info->bb_bitmap)) {
++                      unsigned long blocknr;
++                      blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
++                      blocknr += first + i;
++                      blocknr +=
++                          le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
++
++                      ext3_error(sb, __FUNCTION__, "double-free of inode"
++                                 " %lu's block %lu(bit %u in group %u)\n",
++                                 inode ? inode->i_ino : 0, blocknr,
++                                 first + i, e3b->bd_group);
++              }
++              mb_clear_bit(first + i, e3b->bd_info->bb_bitmap);
++      }
++}
++
++void mb_mark_used_double(struct ext3_buddy *e3b, int first, int count)
++{
++      int i;
++      if (unlikely(e3b->bd_info->bb_bitmap == NULL))
++              return;
++      BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
++      for (i = 0; i < count; i++) {
++              BUG_ON(mb_test_bit(first + i, e3b->bd_info->bb_bitmap));
++              mb_set_bit(first + i, e3b->bd_info->bb_bitmap);
++      }
++}
++
++void mb_cmp_bitmaps(struct ext3_buddy *e3b, void *bitmap)
++{
++      if (memcmp(e3b->bd_info->bb_bitmap, bitmap, e3b->bd_sb->s_blocksize)) {
++              unsigned char *b1, *b2;
++              int i;
++              b1 = (unsigned char *) e3b->bd_info->bb_bitmap;
++              b2 = (unsigned char *) bitmap;
++              for (i = 0; i < e3b->bd_sb->s_blocksize; i++) {
++                      if (b1[i] != b2[i]) {
++                              printk("corruption in group %u at byte %u(%u): "
++                                     "%x in copy != %x on disk/prealloc\n",
++                                      e3b->bd_group, i, i * 8, b1[i], b2[i]);
++                              BUG();
++                      }
++              }
++      }
++}
++
++#else
++#define mb_free_blocks_double(a,b,c,d)
++#define mb_mark_used_double(a,b,c)
++#define mb_cmp_bitmaps(a,b)
++#endif
++
++#ifdef AGGRESSIVE_CHECK
++
++#define MB_CHECK_ASSERT(assert)                                               \
++do {                                                                  \
++      if (!(assert)) {                                                \
++              printk (KERN_EMERG                                      \
++                      "Assertion failure in %s() at %s:%d: \"%s\"\n", \
++                      function, file, line, # assert);                \
++              BUG();                                                  \
++      }                                                               \
++} while (0)
++
++static int __mb_check_buddy(struct ext3_buddy *e3b, char *file,
++                              const char *function, int line)
++{
++      struct super_block *sb = e3b->bd_sb;
++      int order = e3b->bd_blkbits + 1;
++      int max, max2, i, j, k, count;
++      struct ext3_group_info *grp;
++      int fragments = 0, fstart;
++      struct list_head *cur;
++      void *buddy, *buddy2;
++
++      if (!test_opt(sb, MBALLOC))
++              return 0;
++
++      {
++              static int mb_check_counter = 0;
++              if (mb_check_counter++ % 100 != 0)
++                      return 0;
++      }
++
++      while (order > 1) {
++              buddy = mb_find_buddy(e3b, order, &max);
++              MB_CHECK_ASSERT(buddy);
++              buddy2 = mb_find_buddy(e3b, order - 1, &max2);
++              MB_CHECK_ASSERT(buddy2);
++              MB_CHECK_ASSERT(buddy != buddy2);
++              MB_CHECK_ASSERT(max * 2 == max2);
++
++              count = 0;
++              for (i = 0; i < max; i++) {
++
++                      if (mb_test_bit(i, buddy)) {
++                              /* only single bit in buddy2 may be 1 */
++                              if (!mb_test_bit(i << 1, buddy2))
++                                      MB_CHECK_ASSERT(mb_test_bit((i<<1)+1, buddy2));
++                              else if (!mb_test_bit((i << 1) + 1, buddy2))
++                                      MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
++                              continue;
++                      }
++
++                      /* both bits in buddy2 must be 0 */
++                      MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
++                      MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
++
++                      for (j = 0; j < (1 << order); j++) {
++                              k = (i * (1 << order)) + j;
++                              MB_CHECK_ASSERT(!mb_test_bit(k, EXT3_MB_BITMAP(e3b)));
++                      }
++                      count++;
++              }
++              MB_CHECK_ASSERT(e3b->bd_info->bb_counters[order] == count);
++              order--;
++      }
++
++      fstart = -1;
++      buddy = mb_find_buddy(e3b, 0, &max);
++      for (i = 0; i < max; i++) {
++              if (!mb_test_bit(i, buddy)) {
++                      MB_CHECK_ASSERT(i >= e3b->bd_info->bb_first_free);
++                      if (fstart == -1) {
++                              fragments++;
++                              fstart = i;
++                      }
++                      continue;
++              }
++              fstart = -1;
++              /* check used bits only */
++              for (j = 0; j < e3b->bd_blkbits + 1; j++) {
++                      buddy2 = mb_find_buddy(e3b, j, &max2);
++                      k = i >> j;
++                      MB_CHECK_ASSERT(k < max2);
++                      MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
++              }
++      }
++      MB_CHECK_ASSERT(!EXT3_MB_GRP_NEED_INIT(e3b->bd_info));
++      MB_CHECK_ASSERT(e3b->bd_info->bb_fragments == fragments);
++
++      grp = EXT3_GROUP_INFO(sb, e3b->bd_group);
++      buddy = mb_find_buddy(e3b, 0, &max);
++      list_for_each(cur, &grp->bb_prealloc_list) {
++              unsigned long groupnr;
++              struct ext3_prealloc_space *pa;
++              pa = list_entry(cur, struct ext3_prealloc_space, group_list);
++              ext3_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
++              MB_CHECK_ASSERT(groupnr == e3b->bd_group);
++              for (i = 0; i < pa->len; i++)
++                      MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
++      }
++      return 0;
++}
++#undef MB_CHECK_ASSERT
++#define mb_check_buddy(e3b) __mb_check_buddy(e3b,__FILE__,__FUNCTION__,__LINE__)
++#else
++#define mb_check_buddy(e3b)
++#endif
++
++/* find most significant bit */
++static int inline fmsb(unsigned short word)
++{
++      int order;
++
++      if (word > 255) {
++              order = 7;
++              word >>= 8;
++      } else {
++              order = -1;
++      }
++
++      do {
++              order++;
++              word >>= 1;
++      } while (word != 0);
++
++      return order;
++}
++
++static void inline
++ext3_mb_mark_free_simple(struct super_block *sb, void *buddy, unsigned first,
++                              int len, struct ext3_group_info *grp)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      unsigned short min, max, chunk, border;
++
++      BUG_ON(len >= EXT3_BLOCKS_PER_GROUP(sb));
++
++      border = 2 << sb->s_blocksize_bits;
++
++      while (len > 0) {
++              /* find how many blocks can be covered since this position */
++              max = ffs(first | border) - 1;
++
++              /* find how many blocks of power 2 we need to mark */
++              min = fmsb(len);
++
++              if (max < min)
++                      min = max;
++              chunk = 1 << min;
++
++              /* mark multiblock chunks only */
++              grp->bb_counters[min]++;
++              if (min > 0)
++                      mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]);
++
++              len -= chunk;
++              first += chunk;
++      }
++}
++
++static void
++ext3_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap,
++                      int group)
++{
++      struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
++      unsigned short max = EXT3_BLOCKS_PER_GROUP(sb);
++      unsigned short i = 0, first, len;
++      unsigned free = 0, fragments = 0;
++      unsigned long long period = get_cycles();
++
++      /* initialize buddy from bitmap which is aggregation
++       * of on-disk bitmap and preallocations */
++      i = mb_find_next_zero_bit(bitmap, max, 0);
++      grp->bb_first_free = i;
++      while (i < max) {
++              fragments++;
++              first = i;
++              i = ext2_find_next_le_bit(bitmap, max, i);
++              len = i - first;
++              free += len;
++              if (len > 1)
++                      ext3_mb_mark_free_simple(sb, buddy, first, len, grp);
++              else
++                      grp->bb_counters[0]++;
++              if (i < max)
++                      i = mb_find_next_zero_bit(bitmap, max, i);
++      }
++      grp->bb_fragments = fragments;
++
++      if (free != grp->bb_free) {
++              printk("EXT3-fs: group %u: %u blocks in bitmap, %u in gd\n",
++                      group, free, grp->bb_free);
++              grp->bb_free = free;
++      }
++
++      clear_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, &grp->bb_state);
++
++      period = get_cycles() - period;
++      spin_lock(&EXT3_SB(sb)->s_bal_lock);
++      EXT3_SB(sb)->s_mb_buddies_generated++;
++      EXT3_SB(sb)->s_mb_generation_time += period;
++      spin_unlock(&EXT3_SB(sb)->s_bal_lock);
++}
++
++static int ext3_mb_init_cache(struct page *page, char *incore)
++{
++      int blocksize, blocks_per_page, groups_per_page;
++      int err = 0, i, first_group, first_block;
++      struct super_block *sb;
++      struct buffer_head *bhs;
++      struct buffer_head **bh;
++      struct inode *inode;
++      char *data, *bitmap;
++
++      mb_debug("init page %lu\n", page->index);
++
++      inode = page->mapping->host;
++      sb = inode->i_sb;
++      blocksize = 1 << inode->i_blkbits;
++      blocks_per_page = PAGE_CACHE_SIZE / blocksize;
++
++      groups_per_page = blocks_per_page >> 1;
++      if (groups_per_page == 0)
++              groups_per_page = 1;
++
++      /* allocate buffer_heads to read bitmaps */
++      if (groups_per_page > 1) {
++              err = -ENOMEM;
++              i = sizeof(struct buffer_head *) * groups_per_page;
++              bh = kmalloc(i, GFP_NOFS);
++              if (bh == NULL)
++                      goto out;
++              memset(bh, 0, i);
++      } else
++              bh = &bhs;
++
++      first_group = page->index * blocks_per_page / 2;
++
++      /* read all groups the page covers into the cache */
++      for (i = 0; i < groups_per_page; i++) {
++              struct ext3_group_desc * desc;
++
++              if (first_group + i >= EXT3_SB(sb)->s_groups_count)
++                      break;
++
++              err = -EIO;
++              desc = ext3_get_group_desc(sb, first_group + i, NULL);
++              if (desc == NULL)
++                      goto out;
++
++              err = -ENOMEM;
++              bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
++              if (bh[i] == NULL)
++                      goto out;
++
++              if (buffer_uptodate(bh[i]))
++                      continue;
++
++              lock_buffer(bh[i]);
++              if (buffer_uptodate(bh[i])) {
++                      unlock_buffer(bh[i]);
++                      continue;
++              }
++
++              get_bh(bh[i]);
++              bh[i]->b_end_io = end_buffer_read_sync;
++              submit_bh(READ, bh[i]);
++              mb_debug("read bitmap for group %u\n", first_group + i);
++      }
++
++      /* wait for I/O completion */
++      for (i = 0; i < groups_per_page && bh[i]; i++)
++              wait_on_buffer(bh[i]);
++
++      err = -EIO;
++      for (i = 0; i < groups_per_page && bh[i]; i++)
++              if (!buffer_uptodate(bh[i]))
++                      goto out;
++
++      first_block = page->index * blocks_per_page;
++      for (i = 0; i < blocks_per_page; i++) {
++              int group;
++
++              group = (first_block + i) >> 1;
++              if (group >= EXT3_SB(sb)->s_groups_count)
++                      break;
++
++              data = page_address(page) + (i * blocksize);
++              bitmap = bh[group - first_group]->b_data;
++
++              if ((first_block + i) & 1) {
++                      /* this is block of buddy */
++                      BUG_ON(incore == NULL);
++                      mb_debug("put buddy for group %u in page %lu/%x\n",
++                              group, page->index, i * blocksize);
++                      memset(data, 0xff, blocksize);
++                      EXT3_GROUP_INFO(sb, group)->bb_fragments = 0;
++                      memset(EXT3_GROUP_INFO(sb, group)->bb_counters, 0,
++                             sizeof(unsigned short)*(sb->s_blocksize_bits+2));
++                      ext3_mb_generate_buddy(sb, data, incore, group);
++                      incore = NULL;
++              } else {
++                      /* this is block of bitmap */
++                      BUG_ON(incore != NULL);
++                      mb_debug("put bitmap for group %u in page %lu/%x\n",
++                              group, page->index, i * blocksize);
++
++                      /* see comments in ext3_mb_put_pa() */
++                      ext3_lock_group(sb, group);
++                      memcpy(data, bitmap, blocksize);
++
++                      /* mark all preallocated blocks used in in-core bitmap */
++                      ext3_mb_generate_from_pa(sb, data, group);
++                      ext3_unlock_group(sb, group);
++
++                      incore = data;
++              }
++      }
++      SetPageUptodate(page);
++
++out:
++      if (bh) {
++              for (i = 0; i < groups_per_page && bh[i]; i++)
++                      brelse(bh[i]);
++              if (bh != &bhs)
++                      kfree(bh);
++      }
++      return err;
++}
++
++static int ext3_mb_load_buddy(struct super_block *sb, int group,
++              struct ext3_buddy *e3b)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      struct inode *inode = sbi->s_buddy_cache;
++      int blocks_per_page, block, pnum, poff;
++      struct page *page;
++
++      mb_debug("load group %u\n", group);
++
++      blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
++
++      e3b->bd_blkbits = sb->s_blocksize_bits;
++      e3b->bd_info = EXT3_GROUP_INFO(sb, group);
++      e3b->bd_sb = sb;
++      e3b->bd_group = group;
++      e3b->bd_buddy_page = NULL;
++      e3b->bd_bitmap_page = NULL;
++
++      block = group * 2;
++      pnum = block / blocks_per_page;
++      poff = block % blocks_per_page;
++
++      /* we could use find_or_create_page(), but it locks page
++       * what we'd like to avoid in fast path ... */
++      page = find_get_page(inode->i_mapping, pnum);
++      if (page == NULL || !PageUptodate(page)) {
++              if (page)
++                      page_cache_release(page);
++              page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++              if (page) {
++                      BUG_ON(page->mapping != inode->i_mapping);
++                      if (!PageUptodate(page)) {
++                              ext3_mb_init_cache(page, NULL);
++                              mb_cmp_bitmaps(e3b, page_address(page) +
++                                             (poff * sb->s_blocksize));
++                      }
++                      unlock_page(page);
++              }
++      }
++      if (page == NULL || !PageUptodate(page))
++              goto err;
++      e3b->bd_bitmap_page = page;
++      e3b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
++      mark_page_accessed(page);
++
++      block++;
++      pnum = block / blocks_per_page;
++      poff = block % blocks_per_page;
++
++      page = find_get_page(inode->i_mapping, pnum);
++      if (page == NULL || !PageUptodate(page)) {
++              if (page)
++                      page_cache_release(page);
++              page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++              if (page) {
++                      BUG_ON(page->mapping != inode->i_mapping);
++                      if (!PageUptodate(page))
++                              ext3_mb_init_cache(page, e3b->bd_bitmap);
++
++                      unlock_page(page);
++              }
++      }
++      if (page == NULL || !PageUptodate(page))
++              goto err;
++      e3b->bd_buddy_page = page;
++      e3b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
++      mark_page_accessed(page);
++
++      BUG_ON(e3b->bd_bitmap_page == NULL);
++      BUG_ON(e3b->bd_buddy_page == NULL);
++
++      return 0;
++
++err:
++      if (e3b->bd_bitmap_page)
++              page_cache_release(e3b->bd_bitmap_page);
++      if (e3b->bd_buddy_page)
++              page_cache_release(e3b->bd_buddy_page);
++      e3b->bd_buddy = NULL;
++      e3b->bd_bitmap = NULL;
++      return -EIO;
++}
++
++static void ext3_mb_release_desc(struct ext3_buddy *e3b)
++{
++      if (e3b->bd_bitmap_page)
++              page_cache_release(e3b->bd_bitmap_page);
++      if (e3b->bd_buddy_page)
++              page_cache_release(e3b->bd_buddy_page);
++}
++
++
++static int mb_find_order_for_block(struct ext3_buddy *e3b, int block)
++{
++      int order = 1;
++      void *bb;
++
++      BUG_ON(EXT3_MB_BITMAP(e3b) == EXT3_MB_BUDDY(e3b));
++      BUG_ON(block >= (1 << (e3b->bd_blkbits + 3)));
++
++      bb = EXT3_MB_BUDDY(e3b);
++      while (order <= e3b->bd_blkbits + 1) {
++              block = block >> 1;
++              if (!mb_test_bit(block, bb)) {
++                      /* this block is part of buddy of order 'order' */
++                      return order;
++              }
++              bb += 1 << (e3b->bd_blkbits - order);
++              order++;
++      }
++      return 0;
++}
++
++static inline void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
++{
++      __u32 *addr;
++
++      len = cur + len;
++      while (cur < len) {
++              if ((cur & 31) == 0 && (len - cur) >= 32) {
++                      /* fast path: clear whole word at once */
++                      addr = bm + (cur >> 3);
++                      *addr = 0;
++                      cur += 32;
++                      continue;
++              }
++              mb_clear_bit_atomic(lock, cur, bm);
++              cur++;
++      }
++}
++
++static inline void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
++{
++      __u32 *addr;
++
++      len = cur + len;
++      while (cur < len) {
++              if ((cur & 31) == 0 && (len - cur) >= 32) {
++                      /* fast path: clear whole word at once */
++                      addr = bm + (cur >> 3);
++                      *addr = 0xffffffff;
++                      cur += 32;
++                      continue;
++              }
++              mb_set_bit_atomic(lock, cur, bm);
++              cur++;
++      }
++}
++
++static int mb_free_blocks(struct inode *inode, struct ext3_buddy *e3b,
++                        int first, int count)
++{
++      int block = 0, max = 0, order;
++      void *buddy, *buddy2;
++      struct super_block *sb = e3b->bd_sb;
++
++      BUG_ON(first + count > (sb->s_blocksize << 3));
++      BUG_ON(!ext3_is_group_locked(sb, e3b->bd_group));
++      mb_check_buddy(e3b);
++      mb_free_blocks_double(inode, e3b, first, count);
++
++      e3b->bd_info->bb_free += count;
++      if (first < e3b->bd_info->bb_first_free)
++              e3b->bd_info->bb_first_free = first;
++
++      /* let's maintain fragments counter */
++      if (first != 0)
++              block = !mb_test_bit(first - 1, EXT3_MB_BITMAP(e3b));
++      if (first + count < EXT3_SB(sb)->s_mb_maxs[0])
++              max = !mb_test_bit(first + count, EXT3_MB_BITMAP(e3b));
++      if (block && max)
++              e3b->bd_info->bb_fragments--;
++      else if (!block && !max)
++              e3b->bd_info->bb_fragments++;
++
++      /* let's maintain buddy itself */
++      while (count-- > 0) {
++              block = first++;
++              order = 0;
++
++              if (!mb_test_bit(block, EXT3_MB_BITMAP(e3b))) {
++                      unsigned long blocknr;
++                      blocknr = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb);
++                      blocknr += block;
++                      blocknr +=
++                          le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
++
++                      ext3_error(sb, __FUNCTION__, "double-free of inode"
++                                 " %lu's block %lu(bit %u in group %u)\n",
++                                 inode ? inode->i_ino : 0, blocknr, block,
++                                 e3b->bd_group);
++              }
++              mb_clear_bit(block, EXT3_MB_BITMAP(e3b));
++              e3b->bd_info->bb_counters[order]++;
++
++              /* start of the buddy */
++              buddy = mb_find_buddy(e3b, order, &max);
++
++              do {
++                      block &= ~1UL;
++                      if (mb_test_bit(block, buddy) ||
++                                      mb_test_bit(block + 1, buddy))
++                              break;
++
++                      /* both the buddies are free, try to coalesce them */
++                      buddy2 = mb_find_buddy(e3b, order + 1, &max);
++
++                      if (!buddy2)
++                              break;
++
++                      if (order > 0) {
++                              /* for special purposes, we don't set
++                               * free bits in bitmap */
++                              mb_set_bit(block, buddy);
++                              mb_set_bit(block + 1, buddy);
++                      }
++                      e3b->bd_info->bb_counters[order]--;
++                      e3b->bd_info->bb_counters[order]--;
++
++                      block = block >> 1;
++                      order++;
++                      e3b->bd_info->bb_counters[order]++;
++
++                      mb_clear_bit(block, buddy2);
++                      buddy = buddy2;
++              } while (1);
++      }
++      mb_check_buddy(e3b);
++
++      return 0;
++}
++
++static int mb_find_extent(struct ext3_buddy *e3b, int order, int block,
++                              int needed, struct ext3_free_extent *ex)
++{
++      int next = block, max, ord;
++      void *buddy;
++
++      BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
++      BUG_ON(ex == NULL);
++
++      buddy = mb_find_buddy(e3b, order, &max);
++      BUG_ON(buddy == NULL);
++      BUG_ON(block >= max);
++      if (mb_test_bit(block, buddy)) {
++              ex->fe_len = 0;
++              ex->fe_start = 0;
++              ex->fe_group = 0;
++              return 0;
++      }
++
++      if (likely(order == 0)) {
++              /* find actual order */
++              order = mb_find_order_for_block(e3b, block);
++              block = block >> order;
++      }
++
++      ex->fe_len = 1 << order;
++      ex->fe_start = block << order;
++      ex->fe_group = e3b->bd_group;
++
++      /* calc difference from given start */
++      next = next - ex->fe_start;
++      ex->fe_len -= next;
++      ex->fe_start += next;
++
++      while (needed > ex->fe_len && (buddy = mb_find_buddy(e3b, order, &max))) {
++
++              if (block + 1 >= max)
++                      break;
++
++              next = (block + 1) * (1 << order);
++              if (mb_test_bit(next, EXT3_MB_BITMAP(e3b)))
++                      break;
++
++              ord = mb_find_order_for_block(e3b, next);
++
++              order = ord;
++              block = next >> order;
++              ex->fe_len += 1 << order;
++      }
++
++      BUG_ON(ex->fe_start + ex->fe_len > (1 << (e3b->bd_blkbits + 3)));
++      return ex->fe_len;
++}
++
++static int mb_mark_used(struct ext3_buddy *e3b, struct ext3_free_extent *ex)
++{
++      int ord, mlen = 0, max = 0, cur;
++      int start = ex->fe_start;
++      int len = ex->fe_len;
++      unsigned ret = 0;
++      int len0 = len;
++      void *buddy;
++
++      BUG_ON(start + len > (e3b->bd_sb->s_blocksize << 3));
++      BUG_ON(e3b->bd_group != ex->fe_group);
++      BUG_ON(!ext3_is_group_locked(e3b->bd_sb, e3b->bd_group));
++      mb_check_buddy(e3b);
++      mb_mark_used_double(e3b, start, len);
++
++      e3b->bd_info->bb_free -= len;
++      if (e3b->bd_info->bb_first_free == start)
++              e3b->bd_info->bb_first_free += len;
++
++      /* let's maintain fragments counter */
++      if (start != 0)
++              mlen = !mb_test_bit(start - 1, EXT3_MB_BITMAP(e3b));
++      if (start + len < EXT3_SB(e3b->bd_sb)->s_mb_maxs[0])
++              max = !mb_test_bit(start + len, EXT3_MB_BITMAP(e3b));
++      if (mlen && max)
++              e3b->bd_info->bb_fragments++;
++      else if (!mlen && !max)
++              e3b->bd_info->bb_fragments--;
++
++      /* let's maintain buddy itself */
++      while (len) {
++              ord = mb_find_order_for_block(e3b, start);
++
++              if (((start >> ord) << ord) == start && len >= (1 << ord)) {
++                      /* the whole chunk may be allocated at once! */
++                      mlen = 1 << ord;
++                      buddy = mb_find_buddy(e3b, ord, &max);
++                      BUG_ON((start >> ord) >= max);
++                      mb_set_bit(start >> ord, buddy);
++                      e3b->bd_info->bb_counters[ord]--;
++                      start += mlen;
++                      len -= mlen;
++                      BUG_ON(len < 0);
++                      continue;
++              }
++
++              /* store for history */
++              if (ret == 0)
++                      ret = len | (ord << 16);
++
++              /* we have to split large buddy */
++              BUG_ON(ord <= 0);
++              buddy = mb_find_buddy(e3b, ord, &max);
++              mb_set_bit(start >> ord, buddy);
++              e3b->bd_info->bb_counters[ord]--;
++
++              ord--;
++              cur = (start >> ord) & ~1U;
++              buddy = mb_find_buddy(e3b, ord, &max);
++              mb_clear_bit(cur, buddy);
++              mb_clear_bit(cur + 1, buddy);
++              e3b->bd_info->bb_counters[ord]++;
++              e3b->bd_info->bb_counters[ord]++;
++      }
++
++      mb_set_bits(sb_bgl_lock(EXT3_SB(e3b->bd_sb), ex->fe_group),
++                  EXT3_MB_BITMAP(e3b), ex->fe_start, len0);
++      mb_check_buddy(e3b);
++
++      return ret;
++}
++
++/*
++ * Must be called under group lock!
++ */
++static void ext3_mb_use_best_found(struct ext3_allocation_context *ac,
++                                      struct ext3_buddy *e3b)
++{
++      unsigned long ret;
++
++      BUG_ON(ac->ac_b_ex.fe_group != e3b->bd_group);
++      BUG_ON(ac->ac_status == AC_STATUS_FOUND);
++
++      ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
++      ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
++      ret = mb_mark_used(e3b, &ac->ac_b_ex);
++
++      /* preallocation can change ac_b_ex, thus we store actually
++       * allocated blocks for history */
++      ac->ac_f_ex = ac->ac_b_ex;
++
++      ac->ac_status = AC_STATUS_FOUND;
++      ac->ac_tail = ret & 0xffff;
++      ac->ac_buddy = ret >> 16;
++
++      /* XXXXXXX: SUCH A HORRIBLE **CK */
++      ac->ac_bitmap_page = e3b->bd_bitmap_page;
++      get_page(ac->ac_bitmap_page);
++      ac->ac_buddy_page = e3b->bd_buddy_page;
++      get_page(ac->ac_buddy_page);
++}
++
++/*
++ * regular allocator, for general purposes allocation
++ */
++
++void ext3_mb_check_limits(struct ext3_allocation_context *ac,
++                                      struct ext3_buddy *e3b,
++                                      int finish_group)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
++      struct ext3_free_extent *bex = &ac->ac_b_ex;
++      struct ext3_free_extent *gex = &ac->ac_g_ex;
++      struct ext3_free_extent ex;
++      int max;
++
++      /*
++       * We don't want to scan for a whole year
++       */
++      if (ac->ac_found > sbi->s_mb_max_to_scan &&
++                      !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
++              ac->ac_status = AC_STATUS_BREAK;
++              return;
++      }
++
++      /*
++       * Haven't found good chunk so far, let's continue
++       */
++      if (bex->fe_len < gex->fe_len)
++              return;
++
++      if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
++                      && bex->fe_group == e3b->bd_group) {
++              /* recheck chunk's availability - we don't know
++               * when it was found (within this lock-unlock
++               * period or not) */
++              max = mb_find_extent(e3b, 0, bex->fe_start, gex->fe_len, &ex);
++              if (max >= gex->fe_len) {
++                      ext3_mb_use_best_found(ac, e3b);
++                      return;
++              }
++      }
++}
++
++/*
++ * The routine checks whether found extent is good enough. If it is,
++ * then the extent gets marked used and flag is set to the context
++ * to stop scanning. Otherwise, the extent is compared with the
++ * previous found extent and if new one is better, then it's stored
++ * in the context. Later, the best found extent will be used, if
++ * mballoc can't find good enough extent.
++ *
++ * FIXME: real allocation policy is to be designed yet!
++ */
++static void ext3_mb_measure_extent(struct ext3_allocation_context *ac,
++                                      struct ext3_free_extent *ex,
++                                      struct ext3_buddy *e3b)
++{
++      struct ext3_free_extent *bex = &ac->ac_b_ex;
++      struct ext3_free_extent *gex = &ac->ac_g_ex;
++
++      BUG_ON(ex->fe_len <= 0);
++      BUG_ON(ex->fe_len >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
++      BUG_ON(ex->fe_start >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
++      BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
++
++      ac->ac_found++;
++
++      /*
++       * The special case - take what you catch first
++       */
++      if (unlikely(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
++              *bex = *ex;
++              ext3_mb_use_best_found(ac, e3b);
++              return;
++      }
++
++      /*
++       * Let's check whether the chuck is good enough
++       */
++      if (ex->fe_len == gex->fe_len) {
++              *bex = *ex;
++              ext3_mb_use_best_found(ac, e3b);
++              return;
++      }
++
++      /*
++       * If this is first found extent, just store it in the context
++       */
++      if (bex->fe_len == 0) {
++              *bex = *ex;
++              return;
++      }
++
++      /*
++       * If new found extent is better, store it in the context
++       */
++      if (bex->fe_len < gex->fe_len) {
++              /* if the request isn't satisfied, any found extent
++               * larger than previous best one is better */
++              if (ex->fe_len > bex->fe_len)
++                      *bex = *ex;
++      } else if (ex->fe_len > gex->fe_len) {
++              /* if the request is satisfied, then we try to find
++               * an extent that still satisfy the request, but is
++               * smaller than previous one */
++              *bex = *ex;
++      }
++
++      ext3_mb_check_limits(ac, e3b, 0);
++}
++
++static int ext3_mb_try_best_found(struct ext3_allocation_context *ac,
++                                      struct ext3_buddy *e3b)
++{
++      struct ext3_free_extent ex = ac->ac_b_ex;
++      int group = ex.fe_group, max, err;
++
++      BUG_ON(ex.fe_len <= 0);
++      err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
++      if (err)
++              return err;
++
++      ext3_lock_group(ac->ac_sb, group);
++      max = mb_find_extent(e3b, 0, ex.fe_start, ex.fe_len, &ex);
++
++      if (max > 0) {
++              ac->ac_b_ex = ex;
++              ext3_mb_use_best_found(ac, e3b);
++      }
++
++      ext3_unlock_group(ac->ac_sb, group);
++      ext3_mb_release_desc(e3b);
++
++      return 0;
++}
++
++static int ext3_mb_find_by_goal(struct ext3_allocation_context *ac,
++                              struct ext3_buddy *e3b)
++{
++      int group = ac->ac_g_ex.fe_group, max, err;
++      struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
++      struct ext3_super_block *es = sbi->s_es;
++      struct ext3_free_extent ex;
++
++      err = ext3_mb_load_buddy(ac->ac_sb, group, e3b);
++      if (err)
++              return err;
++
++      ext3_lock_group(ac->ac_sb, group);
++      max = mb_find_extent(e3b, 0, ac->ac_g_ex.fe_start,
++                           ac->ac_g_ex.fe_len, &ex);
++
++      if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
++              unsigned long start;
++              start = (e3b->bd_group * EXT3_BLOCKS_PER_GROUP(ac->ac_sb) +
++                      ex.fe_start + le32_to_cpu(es->s_first_data_block));
++              if (start % sbi->s_stripe == 0) {
++                      ac->ac_found++;
++                      ac->ac_b_ex = ex;
++                      ext3_mb_use_best_found(ac, e3b);
++              }
++      } else if (max >= ac->ac_g_ex.fe_len) {
++              BUG_ON(ex.fe_len <= 0);
++              BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
++              BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
++              ac->ac_found++;
++              ac->ac_b_ex = ex;
++              ext3_mb_use_best_found(ac, e3b);
++      } else if (max > 0 && (ac->ac_flags & EXT3_MB_HINT_MERGE)) {
++              /* Sometimes, caller may want to merge even small
++               * number of blocks to an existing extent */
++              BUG_ON(ex.fe_len <= 0);
++              BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
++              BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
++              ac->ac_found++;
++              ac->ac_b_ex = ex;
++              ext3_mb_use_best_found(ac, e3b);
++      }
++      ext3_unlock_group(ac->ac_sb, group);
++      ext3_mb_release_desc(e3b);
++
++      return 0;
++}
++
++/*
++ * The routine scans buddy structures (not bitmap!) from given order
++ * to max order and tries to find big enough chunk to satisfy the req
++ */
++static void ext3_mb_simple_scan_group(struct ext3_allocation_context *ac,
++                                      struct ext3_buddy *e3b)
++{
++      struct super_block *sb = ac->ac_sb;
++      struct ext3_group_info *grp = e3b->bd_info;
++      void *buddy;
++      int i, k, max;
++
++      BUG_ON(ac->ac_2order <= 0);
++      for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
++              if (grp->bb_counters[i] == 0)
++                      continue;
++
++              buddy = mb_find_buddy(e3b, i, &max);
++              BUG_ON(buddy == NULL);
++
++              k = mb_find_next_zero_bit(buddy, max, 0);
++              BUG_ON(k >= max);
++
++              ac->ac_found++;
++
++              ac->ac_b_ex.fe_len = 1 << i;
++              ac->ac_b_ex.fe_start = k << i;
++              ac->ac_b_ex.fe_group = e3b->bd_group;
++
++              ext3_mb_use_best_found(ac, e3b);
++
++              BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
++
++              if (EXT3_SB(sb)->s_mb_stats)
++                      atomic_inc(&EXT3_SB(sb)->s_bal_2orders);
++
++              break;
++      }
++}
++
++/*
++ * The routine scans the group and measures all found extents.
++ * In order to optimize scanning, caller must pass number of
++ * free blocks in the group, so the routine can know upper limit.
++ */
++static void ext3_mb_complex_scan_group(struct ext3_allocation_context *ac,
++                                      struct ext3_buddy *e3b)
++{
++      struct super_block *sb = ac->ac_sb;
++      void *bitmap = EXT3_MB_BITMAP(e3b);
++      struct ext3_free_extent ex;
++      int i, free;
++
++      free = e3b->bd_info->bb_free;
++      BUG_ON(free <= 0);
++
++      i = e3b->bd_info->bb_first_free;
++
++      while (free && ac->ac_status == AC_STATUS_CONTINUE) {
++              i = mb_find_next_zero_bit(bitmap, EXT3_BLOCKS_PER_GROUP(sb), i);
++              if (i >= EXT3_BLOCKS_PER_GROUP(sb)) {
++                      BUG_ON(free != 0);
++                      break;
++              }
++
++              mb_find_extent(e3b, 0, i, ac->ac_g_ex.fe_len, &ex);
++              BUG_ON(ex.fe_len <= 0);
++              BUG_ON(free < ex.fe_len);
++
++              ext3_mb_measure_extent(ac, &ex, e3b);
++
++              i += ex.fe_len;
++              free -= ex.fe_len;
++      }
++
++      ext3_mb_check_limits(ac, e3b, 1);
++}
++
++/*
++ * This is a special case for storages like raid5
++ * we try to find stripe-aligned chunks for stripe-size requests
++ */
++static void ext3_mb_scan_aligned(struct ext3_allocation_context *ac,
++                               struct ext3_buddy *e3b)
++{
++      struct super_block *sb = ac->ac_sb;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      void *bitmap = EXT3_MB_BITMAP(e3b);
++      struct ext3_free_extent ex;
++      unsigned long i, max;
++
++      BUG_ON(sbi->s_stripe == 0);
++
++      /* find first stripe-aligned block */
++      i = e3b->bd_group * EXT3_BLOCKS_PER_GROUP(sb)
++              + le32_to_cpu(sbi->s_es->s_first_data_block);
++      i = ((i + sbi->s_stripe - 1) / sbi->s_stripe) * sbi->s_stripe;
++      i = (i - le32_to_cpu(sbi->s_es->s_first_data_block))
++                      % EXT3_BLOCKS_PER_GROUP(sb);
++
++      while (i < EXT3_BLOCKS_PER_GROUP(sb)) {
++              if (!mb_test_bit(i, bitmap)) {
++                      max = mb_find_extent(e3b, 0, i, sbi->s_stripe, &ex);
++                      if (max >= sbi->s_stripe) {
++                              ac->ac_found++;
++                              ac->ac_b_ex = ex;
++                              ext3_mb_use_best_found(ac, e3b);
++                              break;
++                      }
++              }
++              i += sbi->s_stripe;
++      }
++}
++
++static int ext3_mb_good_group(struct ext3_allocation_context *ac,
++                              int group, int cr)
++{
++      struct ext3_group_info *grp = EXT3_GROUP_INFO(ac->ac_sb, group);
++      unsigned free, fragments, i, bits;
++
++      BUG_ON(cr < 0 || cr >= 4);
++      BUG_ON(EXT3_MB_GRP_NEED_INIT(grp));
++
++      free = grp->bb_free;
++      fragments = grp->bb_fragments;
++      if (free == 0)
++              return 0;
++      if (fragments == 0)
++              return 0;
++
++      switch (cr) {
++              case 0:
++                      BUG_ON(ac->ac_2order == 0);
++                      bits = ac->ac_sb->s_blocksize_bits + 1;
++                      for (i = ac->ac_2order; i <= bits; i++)
++                              if (grp->bb_counters[i] > 0)
++                                      return 1;
++                      break;
++              case 1:
++                      if ((free / fragments) >= ac->ac_g_ex.fe_len)
++                              return 1;
++                      break;
++              case 2:
++                      if (free >= ac->ac_g_ex.fe_len)
++                              return 1;
++                      break;
++              case 3:
++                      return 1;
++              default:
++                      BUG();
++      }
++
++      return 0;
++}
++
++int ext3_mb_regular_allocator(struct ext3_allocation_context *ac)
++{
++      int group, i, cr, err = 0;
++      struct ext3_sb_info *sbi;
++      struct super_block *sb;
++      struct ext3_buddy e3b;
++
++      sb = ac->ac_sb;
++      sbi = EXT3_SB(sb);
++      BUG_ON(ac->ac_status == AC_STATUS_FOUND);
++
++      /* first, try the goal */
++      err = ext3_mb_find_by_goal(ac, &e3b);
++      if (err || ac->ac_status == AC_STATUS_FOUND)
++              goto out;
++
++      if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
++              goto out;
++
++      i = ffs(ac->ac_g_ex.fe_len);
++      ac->ac_2order = 0;
++      if (i >= sbi->s_mb_order2_reqs) {
++              i--;
++              if ((ac->ac_g_ex.fe_len & (~(1 << i))) == 0)
++                      ac->ac_2order = i;
++      }
++
++      group = ac->ac_g_ex.fe_group;
++
++      /* Let's just scan groups to find more-less suitable blocks */
++      cr = ac->ac_2order ? 0 : 1;
++repeat:
++      for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
++              ac->ac_criteria = cr;
++              for (i = 0; i < EXT3_SB(sb)->s_groups_count; group++, i++) {
++                      struct ext3_group_info *grp;
++
++                      if (group == EXT3_SB(sb)->s_groups_count)
++                              group = 0;
++
++                      /* quick check to skip empty groups */
++                      grp = EXT3_GROUP_INFO(ac->ac_sb, group);
++                      if (grp->bb_free == 0)
++                              continue;
++
++                      if (EXT3_MB_GRP_NEED_INIT(EXT3_GROUP_INFO(sb, group))) {
++                              /* we need full data about the group
++                               * to make a good selection */
++                              err = ext3_mb_load_buddy(sb, group, &e3b);
++                              if (err)
++                                      goto out;
++                              ext3_mb_release_desc(&e3b);
++                      }
++
++                      /* check is group good for our criteries */
++                      if (!ext3_mb_good_group(ac, group, cr))
++                              continue;
++
++                      err = ext3_mb_load_buddy(sb, group, &e3b);
++                      if (err)
++                              goto out;
++
++                      ext3_lock_group(sb, group);
++                      if (!ext3_mb_good_group(ac, group, cr)) {
++                              /* someone did allocation from this group */
++                              ext3_unlock_group(sb, group);
++                              ext3_mb_release_desc(&e3b);
++                              continue;
++                      }
++
++                      ac->ac_groups_scanned++;
++                      if (cr == 0)
++                              ext3_mb_simple_scan_group(ac, &e3b);
++                      else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe)
++                              ext3_mb_scan_aligned(ac, &e3b);
++                      else
++                              ext3_mb_complex_scan_group(ac, &e3b);
++
++                      ext3_unlock_group(sb, group);
++                      ext3_mb_release_desc(&e3b);
++
++                      if (ac->ac_status != AC_STATUS_CONTINUE)
++                              break;
++              }
++      }
++
++      if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
++          !(ac->ac_flags & EXT3_MB_HINT_FIRST)) {
++              /*
++               * We've been searching too long. Let's try to allocate
++               * the best chunk we've found so far
++               */
++
++              ext3_mb_try_best_found(ac, &e3b);
++              if (ac->ac_status != AC_STATUS_FOUND) {
++                      /*
++                       * Someone more lucky has already allocated it.
++                       * The only thing we can do is just take first
++                       * found block(s)
++                      printk(KERN_DEBUG "EXT3-fs: someone won our chunk\n");
++                       */
++                      ac->ac_b_ex.fe_group = 0;
++                      ac->ac_b_ex.fe_start = 0;
++                      ac->ac_b_ex.fe_len = 0;
++                      ac->ac_status = AC_STATUS_CONTINUE;
++                      ac->ac_flags |= EXT3_MB_HINT_FIRST;
++                      cr = 3;
++                      atomic_inc(&sbi->s_mb_lost_chunks);
++                      goto repeat;
++              }
++      }
++out:
++      return err;
++}
++
++#ifdef EXT3_MB_HISTORY
++struct ext3_mb_proc_session {
++      struct ext3_mb_history *history;
++      struct super_block *sb;
++      int start;
++      int max;
++};
++
++static void *ext3_mb_history_skip_empty(struct ext3_mb_proc_session *s,
++                                      struct ext3_mb_history *hs,
++                                      int first)
++{
++      if (hs == s->history + s->max)
++              hs = s->history;
++      if (!first && hs == s->history + s->start)
++              return NULL;
++      while (hs->orig.fe_len == 0) {
++              hs++;
++              if (hs == s->history + s->max)
++                      hs = s->history;
++              if (hs == s->history + s->start)
++                      return NULL;
++      }
++      return hs;
++}
++
++static void *ext3_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
++{
++      struct ext3_mb_proc_session *s = seq->private;
++      struct ext3_mb_history *hs;
++      int l = *pos;
++
++      if (l == 0)
++              return SEQ_START_TOKEN;
++      hs = ext3_mb_history_skip_empty(s, s->history + s->start, 1);
++      if (!hs)
++              return NULL;
++      while (--l && (hs = ext3_mb_history_skip_empty(s, ++hs, 0)) != NULL);
++      return hs;
++}
++
++static void *ext3_mb_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
++{
++      struct ext3_mb_proc_session *s = seq->private;
++      struct ext3_mb_history *hs = v;
++
++      ++*pos;
++      if (v == SEQ_START_TOKEN)
++              return ext3_mb_history_skip_empty(s, s->history + s->start, 1);
++      else
++              return ext3_mb_history_skip_empty(s, ++hs, 0);
++}
++
++static int ext3_mb_seq_history_show(struct seq_file *seq, void *v)
++{
++      char buf[25], buf2[25], buf3[25], *fmt;
++      struct ext3_mb_history *hs = v;
++
++      if (v == SEQ_START_TOKEN) {
++              seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
++                              "%-5s %-2s %-5s %-5s %-5s %-6s\n",
++                         "pid", "inode", "original", "goal", "result","found",
++                         "grps", "cr", "flags", "merge", "tail", "broken");
++              return 0;
++      }
++
++      if (hs->op == EXT3_MB_HISTORY_ALLOC) {
++              fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
++                      "%-5u %-5s %-5u %-6u\n";
++              sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
++                      hs->result.fe_start, hs->result.fe_len,
++                      hs->result.fe_logical);
++              sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
++                      hs->orig.fe_start, hs->orig.fe_len,
++                      hs->orig.fe_logical);
++              sprintf(buf3, "%lu/%lu/%lu@%lu", hs->goal.fe_group,
++                      hs->goal.fe_start, hs->goal.fe_len,
++                      hs->goal.fe_logical);
++              seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
++                              hs->found, hs->groups, hs->cr, hs->flags,
++                              hs->merged ? "M" : "", hs->tail,
++                              hs->buddy ? 1 << hs->buddy : 0);
++      } else if (hs->op == EXT3_MB_HISTORY_PREALLOC) {
++              fmt = "%-5u %-8u %-23s %-23s %-23s\n";
++              sprintf(buf2, "%lu/%lu/%lu@%lu", hs->result.fe_group,
++                      hs->result.fe_start, hs->result.fe_len,
++                      hs->result.fe_logical);
++              sprintf(buf, "%lu/%lu/%lu@%lu", hs->orig.fe_group,
++                      hs->orig.fe_start, hs->orig.fe_len,
++                      hs->orig.fe_logical);
++              seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
++      } else if (hs->op == EXT3_MB_HISTORY_DISCARD) {
++              sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
++                      hs->result.fe_start, hs->result.fe_len);
++              seq_printf(seq, "%-5u %-8u %-23s discard\n",
++                              hs->pid, hs->ino, buf2);
++      } else if (hs->op == EXT3_MB_HISTORY_FREE) {
++              sprintf(buf2, "%lu/%lu/%lu", hs->result.fe_group,
++                      hs->result.fe_start, hs->result.fe_len);
++              seq_printf(seq, "%-5u %-8u %-23s free\n",
++                              hs->pid, hs->ino, buf2);
++      }
++      return 0;
++}
++
++static void ext3_mb_seq_history_stop(struct seq_file *seq, void *v)
++{
++}
++
++static struct seq_operations ext3_mb_seq_history_ops = {
++      .start  = ext3_mb_seq_history_start,
++      .next   = ext3_mb_seq_history_next,
++      .stop   = ext3_mb_seq_history_stop,
++      .show   = ext3_mb_seq_history_show,
++};
++
++static int ext3_mb_seq_history_open(struct inode *inode, struct file *file)
++{
++      struct super_block *sb = PDE(inode)->data;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      struct ext3_mb_proc_session *s;
++      int rc, size;
++
++      s = kmalloc(sizeof(*s), GFP_KERNEL);
++      if (s == NULL)
++              return -ENOMEM;
++      s->sb = sb;
++      size = sizeof(struct ext3_mb_history) * sbi->s_mb_history_max;
++      s->history = kmalloc(size, GFP_KERNEL);
++      if (s->history == NULL) {
++              kfree(s);
++              return -ENOMEM;
++      }
++
++      spin_lock(&sbi->s_mb_history_lock);
++      memcpy(s->history, sbi->s_mb_history, size);
++      s->max = sbi->s_mb_history_max;
++      s->start = sbi->s_mb_history_cur % s->max;
++      spin_unlock(&sbi->s_mb_history_lock);
++
++      rc = seq_open(file, &ext3_mb_seq_history_ops);
++      if (rc == 0) {
++              struct seq_file *m = (struct seq_file *)file->private_data;
++              m->private = s;
++      } else {
++              kfree(s->history);
++              kfree(s);
++      }
++      return rc;
++
++}
++
++static int ext3_mb_seq_history_release(struct inode *inode, struct file *file)
++{
++      struct seq_file *seq = (struct seq_file *)file->private_data;
++      struct ext3_mb_proc_session *s = seq->private;
++      kfree(s->history);
++      kfree(s);
++      return seq_release(inode, file);
++}
++
++static ssize_t ext3_mb_seq_history_write(struct file *file,
++                              const char __user *buffer,
++                              size_t count, loff_t *ppos)
++{
++      struct seq_file *seq = (struct seq_file *)file->private_data;
++      struct ext3_mb_proc_session *s = seq->private;
++      struct super_block *sb = s->sb;
++      char str[32];
++      int value;
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              "mb_history", (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      value = simple_strtol(str, NULL, 0);
++      if (value < 0)
++              return -ERANGE;
++      EXT3_SB(sb)->s_mb_history_filter = value;
++
++      return count;
++}
++
++static struct file_operations ext3_mb_seq_history_fops = {
++      .owner          = THIS_MODULE,
++      .open           = ext3_mb_seq_history_open,
++      .read           = seq_read,
++      .write          = ext3_mb_seq_history_write,
++      .llseek         = seq_lseek,
++      .release        = ext3_mb_seq_history_release,
++};
++
++static void *ext3_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
++{
++      struct super_block *sb = seq->private;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      long group;
++
++      if (*pos < 0 || *pos >= sbi->s_groups_count)
++              return NULL;
++
++      group = *pos + 1;
++      return (void *) group;
++}
++
++static void *ext3_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
++{
++      struct super_block *sb = seq->private;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      long group;
++
++      ++*pos;
++      if (*pos < 0 || *pos >= sbi->s_groups_count)
++              return NULL;
++      group = *pos + 1;
++      return (void *) group;;
++}
++
++static int ext3_mb_seq_groups_show(struct seq_file *seq, void *v)
++{
++      struct super_block *sb = seq->private;
++      long group = (long) v;
++      int i, err;
++      struct ext3_buddy e3b;
++      struct sg {
++              struct ext3_group_info info;
++              unsigned short counters[16];
++      } sg;
++
++      group--;
++      if (group == 0)
++              seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
++                              "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
++                                "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
++                         "group", "free", "frags", "first",
++                         "2^0", "2^1", "2^2", "2^3", "2^4", "2^5","2^6",
++                         "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
++
++      i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
++              sizeof(struct ext3_group_info);
++      err = ext3_mb_load_buddy(sb, group, &e3b);
++      if (err) {
++              seq_printf(seq, "#%-5lu: I/O error\n", group);
++              return 0;
++      }
++      ext3_lock_group(sb, group);
++      memcpy(&sg, EXT3_GROUP_INFO(sb, group), i);
++      ext3_unlock_group(sb, group);
++      ext3_mb_release_desc(&e3b);
++
++      seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
++                      sg.info.bb_fragments, sg.info.bb_first_free);
++      for (i = 0; i <= 13; i++)
++              seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
++                              sg.info.bb_counters[i] : 0);
++      seq_printf(seq, " ]\n");
++
++      return 0;
++}
++
++static void ext3_mb_seq_groups_stop(struct seq_file *seq, void *v)
++{
++}
++
++static struct seq_operations ext3_mb_seq_groups_ops = {
++      .start  = ext3_mb_seq_groups_start,
++      .next   = ext3_mb_seq_groups_next,
++      .stop   = ext3_mb_seq_groups_stop,
++      .show   = ext3_mb_seq_groups_show,
++};
++
++static int ext3_mb_seq_groups_open(struct inode *inode, struct file *file)
++{
++      struct super_block *sb = PDE(inode)->data;
++      int rc;
++
++      rc = seq_open(file, &ext3_mb_seq_groups_ops);
++      if (rc == 0) {
++              struct seq_file *m = (struct seq_file *)file->private_data;
++              m->private = sb;
++      }
++      return rc;
++
++}
++
++static struct file_operations ext3_mb_seq_groups_fops = {
++      .owner          = THIS_MODULE,
++      .open           = ext3_mb_seq_groups_open,
++      .read           = seq_read,
++      .llseek         = seq_lseek,
++      .release        = seq_release,
++};
++
++static void ext3_mb_history_release(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++
++      remove_proc_entry("mb_groups", sbi->s_mb_proc);
++      remove_proc_entry("mb_history", sbi->s_mb_proc);
++
++      if (sbi->s_mb_history)
++              kfree(sbi->s_mb_history);
++}
++
++static void ext3_mb_history_init(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      int i;
++
++      if (sbi->s_mb_proc != NULL) {
++              struct proc_dir_entry *p;
++              p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
++              if (p) {
++                      p->proc_fops = &ext3_mb_seq_history_fops;
++                      p->data = sb;
++              }
++              p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
++              if (p) {
++                      p->proc_fops = &ext3_mb_seq_groups_fops;
++                      p->data = sb;
++              }
++      }
++
++      sbi->s_mb_history_max = 1000;
++      sbi->s_mb_history_cur = 0;
++      spin_lock_init(&sbi->s_mb_history_lock);
++      i = sbi->s_mb_history_max * sizeof(struct ext3_mb_history);
++      sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
++       if (likely(sbi->s_mb_history != NULL))
++               memset(sbi->s_mb_history, 0, i);
++      /* if we can't allocate history, then we simple won't use it */
++}
++
++static void
++ext3_mb_store_history(struct ext3_allocation_context *ac)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
++      struct ext3_mb_history h;
++
++      if (unlikely(sbi->s_mb_history == NULL))
++              return;
++
++      if (!(ac->ac_op & sbi->s_mb_history_filter))
++              return;
++
++      h.op = ac->ac_op;
++      h.pid = current->pid;
++      h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
++      h.orig = ac->ac_o_ex;
++      h.result = ac->ac_b_ex;
++      h.flags = ac->ac_flags;
++      h.merged = 0;
++      if (ac->ac_op == EXT3_MB_HISTORY_ALLOC) {
++              if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
++                              ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
++                      h.merged = 1;
++              h.goal = ac->ac_g_ex;
++              h.result = ac->ac_f_ex;
++      }
++
++      spin_lock(&sbi->s_mb_history_lock);
++      memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
++      if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
++              sbi->s_mb_history_cur = 0;
++      spin_unlock(&sbi->s_mb_history_lock);
++}
++
++#else
++#define ext3_mb_history_release(sb)
++#define ext3_mb_history_init(sb)
++#endif
++
++int ext3_mb_init_backend(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      int i, j, len, metalen;
++      int num_meta_group_infos =
++              (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) >>
++                      EXT3_DESC_PER_BLOCK_BITS(sb);
++      struct ext3_group_info **meta_group_info;
++
++      /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
++       * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
++       * So a two level scheme suffices for now. */
++      sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
++                                  num_meta_group_infos, GFP_KERNEL);
++      if (sbi->s_group_info == NULL) {
++              printk(KERN_ERR "EXT3-fs: can't allocate buddy meta group\n");
++              return -ENOMEM;
++      }
++      sbi->s_buddy_cache = new_inode(sb);
++      if (sbi->s_buddy_cache == NULL) {
++              printk(KERN_ERR "EXT3-fs: can't get new inode\n");
++              goto err_freesgi;
++      }
++      EXT3_I(sbi->s_buddy_cache)->i_disksize = 0;
++
++      metalen = sizeof(*meta_group_info) << EXT3_DESC_PER_BLOCK_BITS(sb);
++      for (i = 0; i < num_meta_group_infos; i++) {
++              if ((i + 1) == num_meta_group_infos)
++                      metalen = sizeof(*meta_group_info) *
++                              (sbi->s_groups_count -
++                                      (i << EXT3_DESC_PER_BLOCK_BITS(sb)));
++              meta_group_info = kmalloc(metalen, GFP_KERNEL);
++              if (meta_group_info == NULL) {
++                      printk(KERN_ERR "EXT3-fs: can't allocate mem for a "
++                             "buddy group\n");
++                      goto err_freemeta;
++              }
++              sbi->s_group_info[i] = meta_group_info;
++      }
++
++      /*
++       * calculate needed size. if change bb_counters size,
++       * don't forget about ext3_mb_generate_buddy()
++       */
++      len = sizeof(struct ext3_group_info);
++      len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
++      for (i = 0; i < sbi->s_groups_count; i++) {
++              struct ext3_group_desc * desc;
++
++              meta_group_info =
++                      sbi->s_group_info[i >> EXT3_DESC_PER_BLOCK_BITS(sb)];
++              j = i & (EXT3_DESC_PER_BLOCK(sb) - 1);
++
++              meta_group_info[j] = kmalloc(len, GFP_KERNEL);
++              if (meta_group_info[j] == NULL) {
++                      printk(KERN_ERR "EXT3-fs: can't allocate buddy mem\n");
++                      i--;
++                      goto err_freebuddy;
++              }
++              desc = ext3_get_group_desc(sb, i, NULL);
++              if (desc == NULL) {
++                      printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
++                      goto err_freebuddy;
++              }
++              memset(meta_group_info[j], 0, len);
++              set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
++                      &meta_group_info[j]->bb_state);
++
++              /* initialize bb_free to be able to skip
++               * empty groups without initialization */
++              meta_group_info[j]->bb_free =
++                      le16_to_cpu(desc->bg_free_blocks_count);
++
++              INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
++
++#ifdef DOUBLE_CHECK
++              {
++                      struct buffer_head *bh;
++                      meta_group_info[j]->bb_bitmap =
++                              kmalloc(sb->s_blocksize, GFP_KERNEL);
++                      BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
++                      bh = read_block_bitmap(sb, i);
++                      BUG_ON(bh == NULL);
++                      memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
++                                      sb->s_blocksize);
++                      brelse(bh);
++              }
++#endif
++
++      }
++
++      return 0;
++
++err_freebuddy:
++      while (i >= 0) {
++              kfree(EXT3_GROUP_INFO(sb, i));
++              i--;
++      }
++      i = num_meta_group_infos;
++err_freemeta:
++      while (--i >= 0)
++              kfree(sbi->s_group_info[i]);
++      iput(sbi->s_buddy_cache);
++err_freesgi:
++      kfree(sbi->s_group_info);
++      return -ENOMEM;
++}
++
++int ext3_mb_init(struct super_block *sb, int needs_recovery)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      unsigned i, offset, max;
++
++      if (!test_opt(sb, MBALLOC))
++              return 0;
++
++      i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
++
++      sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
++      if (sbi->s_mb_offsets == NULL) {
++              clear_opt(sbi->s_mount_opt, MBALLOC);
++              return -ENOMEM;
++      }
++      sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
++      if (sbi->s_mb_maxs == NULL) {
++              clear_opt(sbi->s_mount_opt, MBALLOC);
++              kfree(sbi->s_mb_maxs);
++              return -ENOMEM;
++      }
++
++      /* order 0 is regular bitmap */
++      sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
++      sbi->s_mb_offsets[0] = 0;
++
++      i = 1;
++      offset = 0;
++      max = sb->s_blocksize << 2;
++      do {
++              sbi->s_mb_offsets[i] = offset;
++              sbi->s_mb_maxs[i] = max;
++              offset += 1 << (sb->s_blocksize_bits - i);
++              max = max >> 1;
++              i++;
++      } while (i <= sb->s_blocksize_bits + 1);
++
++      /* init file for buddy data */
++      if ((i = ext3_mb_init_backend(sb))) {
++              clear_opt(sbi->s_mount_opt, MBALLOC);
++              kfree(sbi->s_mb_offsets);
++              kfree(sbi->s_mb_maxs);
++              return i;
++      }
++
++      spin_lock_init(&sbi->s_md_lock);
++      INIT_LIST_HEAD(&sbi->s_active_transaction);
++      INIT_LIST_HEAD(&sbi->s_closed_transaction);
++      INIT_LIST_HEAD(&sbi->s_committed_transaction);
++      spin_lock_init(&sbi->s_bal_lock);
++
++      sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
++      sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
++      sbi->s_mb_max_groups_to_scan = MB_DEFAULT_MAX_GROUPS_TO_SCAN;
++      sbi->s_mb_stats = MB_DEFAULT_STATS;
++      sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
++      sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
++      sbi->s_mb_history_filter = EXT3_MB_HISTORY_DEFAULT;
++
++      i = sizeof(struct ext3_locality_group) * NR_CPUS;
++      sbi->s_locality_groups = kmalloc(i, GFP_NOFS);
++      if (sbi->s_locality_groups == NULL) {
++              clear_opt(sbi->s_mount_opt, MBALLOC);
++              kfree(sbi->s_mb_offsets);
++              kfree(sbi->s_mb_maxs);
++              return -ENOMEM;
++      }
++      for (i = 0; i < NR_CPUS; i++) {
++              struct ext3_locality_group *lg;
++              lg = &sbi->s_locality_groups[i];
++              sema_init(&lg->lg_sem, 1);
++              INIT_LIST_HEAD(&lg->lg_prealloc_list);
++              spin_lock_init(&lg->lg_prealloc_lock);
++      }
++
++      ext3_mb_init_per_dev_proc(sb);
++      ext3_mb_history_init(sb);
++
++      printk("EXT3-fs: mballoc enabled\n");
++      return 0;
++}
++
++void ext3_mb_cleanup_pa(struct ext3_group_info *grp)
++{
++      struct ext3_prealloc_space *pa;
++      struct list_head *cur, *tmp;
++      int count = 0;
++
++      list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
++              list_del_rcu(&pa->pa_group_list);
++              count++;
++              kfree(pa);
++      }
++      if (count)
++              mb_debug("mballoc: %u PAs left\n", count);
++
++}
++
++int ext3_mb_release(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      int i, num_meta_group_infos;
++
++      if (!test_opt(sb, MBALLOC))
++              return 0;
++
++      /* release freed, non-committed blocks */
++      spin_lock(&sbi->s_md_lock);
++      list_splice_init(&sbi->s_closed_transaction,
++                      &sbi->s_committed_transaction);
++      list_splice_init(&sbi->s_active_transaction,
++                      &sbi->s_committed_transaction);
++      spin_unlock(&sbi->s_md_lock);
++      ext3_mb_free_committed_blocks(sb);
++
++      if (sbi->s_group_info) {
++              for (i = 0; i < sbi->s_groups_count; i++) {
++#ifdef DOUBLE_CHECK
++                      if (EXT3_GROUP_INFO(sb, i)->bb_bitmap)
++                              kfree(EXT3_GROUP_INFO(sb, i)->bb_bitmap);
++#endif
++                      ext3_mb_cleanup_pa(EXT3_GROUP_INFO(sb, i));
++                      kfree(EXT3_GROUP_INFO(sb, i));
++              }
++              num_meta_group_infos = (sbi->s_groups_count +
++                              EXT3_DESC_PER_BLOCK(sb) - 1) >>
++                      EXT3_DESC_PER_BLOCK_BITS(sb);
++              for (i = 0; i < num_meta_group_infos; i++)
++                      kfree(sbi->s_group_info[i]);
++              kfree(sbi->s_group_info);
++      }
++      if (sbi->s_mb_offsets)
++              kfree(sbi->s_mb_offsets);
++      if (sbi->s_mb_maxs)
++              kfree(sbi->s_mb_maxs);
++      if (sbi->s_buddy_cache)
++              iput(sbi->s_buddy_cache);
++      if (sbi->s_mb_stats) {
++              printk("EXT3-fs: mballoc: %u blocks %u reqs (%u success)\n",
++                              atomic_read(&sbi->s_bal_allocated),
++                              atomic_read(&sbi->s_bal_reqs),
++                              atomic_read(&sbi->s_bal_success));
++              printk("EXT3-fs: mballoc: %u extents scanned, %u goal hits, "
++                              "%u 2^N hits, %u breaks, %u lost\n",
++                              atomic_read(&sbi->s_bal_ex_scanned),
++                              atomic_read(&sbi->s_bal_goals),
++                              atomic_read(&sbi->s_bal_2orders),
++                              atomic_read(&sbi->s_bal_breaks),
++                              atomic_read(&sbi->s_mb_lost_chunks));
++              printk("EXT3-fs: mballoc: %lu generated and it took %Lu\n",
++                              sbi->s_mb_buddies_generated++,
++                              sbi->s_mb_generation_time);
++              printk("EXT3-fs: mballoc: %u preallocated, %u discarded\n",
++                              atomic_read(&sbi->s_mb_preallocated),
++                              atomic_read(&sbi->s_mb_discarded));
++      }
++
++      if (sbi->s_locality_groups)
++              kfree(sbi->s_locality_groups);
++
++      ext3_mb_history_release(sb);
++      ext3_mb_destroy_per_dev_proc(sb);
++
++      return 0;
++}
++
++void ext3_mb_free_committed_blocks(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      int err, i, count = 0, count2 = 0;
++      struct ext3_free_metadata *md;
++      struct ext3_buddy e3b;
++
++      if (list_empty(&sbi->s_committed_transaction))
++              return;
++
++      /* there is committed blocks to be freed yet */
++      do {
++              /* get next array of blocks */
++              md = NULL;
++              spin_lock(&sbi->s_md_lock);
++              if (!list_empty(&sbi->s_committed_transaction)) {
++                      md = list_entry(sbi->s_committed_transaction.next,
++                                      struct ext3_free_metadata, list);
++                      list_del(&md->list);
++              }
++              spin_unlock(&sbi->s_md_lock);
++
++              if (md == NULL)
++                      break;
++
++              mb_debug("gonna free %u blocks in group %u (0x%p):",
++                              md->num, md->group, md);
++
++              err = ext3_mb_load_buddy(sb, md->group, &e3b);
++              /* we expect to find existing buddy because it's pinned */
++              BUG_ON(err != 0);
++
++              /* there are blocks to put in buddy to make them really free */
++              count += md->num;
++              count2++;
++              ext3_lock_group(sb, md->group);
++              for (i = 0; i < md->num; i++) {
++                      mb_debug(" %u", md->blocks[i]);
++                      err = mb_free_blocks(NULL, &e3b, md->blocks[i], 1);
++                      BUG_ON(err != 0);
++              }
++              mb_debug("\n");
++              ext3_unlock_group(sb, md->group);
++
++              /* balance refcounts from ext3_mb_free_metadata() */
++              page_cache_release(e3b.bd_buddy_page);
++              page_cache_release(e3b.bd_bitmap_page);
++
++              kfree(md);
++              ext3_mb_release_desc(&e3b);
++
++      } while (md);
++
++      mb_debug("freed %u blocks in %u structures\n", count, count2);
++}
++
++#define EXT3_ROOT                     "ext3"
++#define EXT3_MB_STATS_NAME            "stats"
++#define EXT3_MB_MAX_TO_SCAN_NAME      "max_to_scan"
++#define EXT3_MB_MIN_TO_SCAN_NAME      "min_to_scan"
++#define EXT3_MB_ORDER2_REQ            "order2_req"
++#define EXT3_MB_STREAM_REQ            "stream_req"
++
++static int ext3_mb_stats_read(char *page, char **start, off_t off,
++              int count, int *eof, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      int len;
++
++      *eof = 1;
++      if (off != 0)
++              return 0;
++
++      len = sprintf(page, "%ld\n", sbi->s_mb_stats);
++      *start = page;
++      return len;
++}
++
++static int ext3_mb_stats_write(struct file *file, const char *buffer,
++              unsigned long count, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      char str[32];
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              EXT3_MB_STATS_NAME, (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
++      sbi->s_mb_stats = (simple_strtol(str, NULL, 0) != 0);
++      return count;
++}
++
++static int ext3_mb_max_to_scan_read(char *page, char **start, off_t off,
++              int count, int *eof, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      int len;
++
++      *eof = 1;
++      if (off != 0)
++              return 0;
++
++      len = sprintf(page, "%ld\n", sbi->s_mb_max_to_scan);
++      *start = page;
++      return len;
++}
++
++static int ext3_mb_max_to_scan_write(struct file *file, const char *buffer,
++              unsigned long count, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      char str[32];
++      long value;
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              EXT3_MB_MAX_TO_SCAN_NAME, (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
++      value = simple_strtol(str, NULL, 0);
++      if (value <= 0)
++              return -ERANGE;
++
++      sbi->s_mb_max_to_scan = value;
++
++      return count;
++}
++
++static int ext3_mb_min_to_scan_read(char *page, char **start, off_t off,
++              int count, int *eof, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      int len;
++
++      *eof = 1;
++      if (off != 0)
++              return 0;
++
++      len = sprintf(page, "%ld\n", sbi->s_mb_min_to_scan);
++      *start = page;
++      return len;
++}
++
++static int ext3_mb_order2_req_write(struct file *file, const char *buffer,
++              unsigned long count, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      char str[32];
++      long value;
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
++      value = simple_strtol(str, NULL, 0);
++      if (value <= 0)
++              return -ERANGE;
++
++      sbi->s_mb_order2_reqs = value;
++
++      return count;
++}
++
++static int ext3_mb_order2_req_read(char *page, char **start, off_t off,
++              int count, int *eof, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      int len;
++
++      *eof = 1;
++      if (off != 0)
++              return 0;
++
++      len = sprintf(page, "%ld\n", sbi->s_mb_order2_reqs);
++      *start = page;
++      return len;
++}
++
++static int ext3_mb_min_to_scan_write(struct file *file, const char *buffer,
++              unsigned long count, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      char str[32];
++      long value;
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              EXT3_MB_MIN_TO_SCAN_NAME, (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
++      value = simple_strtol(str, NULL, 0);
++      if (value <= 0)
++              return -ERANGE;
++
++      sbi->s_mb_min_to_scan = value;
++
++      return count;
++}
++
++static int ext3_mb_stream_req_read(char *page, char **start, off_t off,
++              int count, int *eof, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      int len;
++
++      *eof = 1;
++      if (off != 0)
++              return 0;
++
++      len = sprintf(page, "%ld\n", sbi->s_mb_stream_request);
++      *start = page;
++      return len;
++}
++
++static int ext3_mb_stream_req_write(struct file *file, const char *buffer,
++              unsigned long count, void *data)
++{
++      struct ext3_sb_info *sbi = data;
++      char str[32];
++      long value;
++
++      if (count >= sizeof(str)) {
++              printk(KERN_ERR "EXT3-fs: %s string too long, max %u bytes\n",
++                              EXT3_MB_STREAM_REQ, (int)sizeof(str));
++              return -EOVERFLOW;
++      }
++
++      if (copy_from_user(str, buffer, count))
++              return -EFAULT;
++
++      /* Only set to 0 or 1 respectively; zero->0; non-zero->1 */
++      value = simple_strtol(str, NULL, 0);
++      if (value <= 0)
++              return -ERANGE;
++
++      sbi->s_mb_stream_request = value;
++
++      return count;
++}
++
++int ext3_mb_init_per_dev_proc(struct super_block *sb)
++{
++      mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      struct proc_dir_entry *proc;
++      char devname[64], *name;
++
++      snprintf(devname, sizeof(devname) - 1, "%s",
++              bdevname(sb->s_bdev, devname));
++      sbi->s_mb_proc = proc_mkdir(devname, proc_root_ext3);
++
++      name = EXT3_MB_STATS_NAME;
++      proc = create_proc_entry(name, mode, sbi->s_mb_proc);
++      if (proc == NULL)
++              goto err_out;
++      proc->data = sbi;
++      proc->read_proc  = ext3_mb_stats_read;
++      proc->write_proc = ext3_mb_stats_write;
++
++      name = EXT3_MB_MAX_TO_SCAN_NAME;
++      proc = create_proc_entry(name, mode, sbi->s_mb_proc);
++      if (proc == NULL)
++              goto err_out;
++      proc->data = sbi;
++      proc->read_proc  = ext3_mb_max_to_scan_read;
++      proc->write_proc = ext3_mb_max_to_scan_write;
++
++      name = EXT3_MB_MIN_TO_SCAN_NAME;
++      proc = create_proc_entry(name, mode, sbi->s_mb_proc);
++      if (proc == NULL)
++              goto err_out;
++      proc->data = sbi;
++      proc->read_proc  = ext3_mb_min_to_scan_read;
++      proc->write_proc = ext3_mb_min_to_scan_write;
++
++      name = EXT3_MB_ORDER2_REQ;
++      proc = create_proc_entry(name, mode, sbi->s_mb_proc);
++      if (proc == NULL)
++              goto err_out;
++      proc->data = sbi;
++      proc->read_proc  = ext3_mb_order2_req_read;
++      proc->write_proc = ext3_mb_order2_req_write;
++
++      name = EXT3_MB_STREAM_REQ;
++      proc = create_proc_entry(name, mode, sbi->s_mb_proc);
++      if (proc == NULL)
++              goto err_out;
++      proc->data = sbi;
++      proc->read_proc  = ext3_mb_stream_req_read;
++      proc->write_proc = ext3_mb_stream_req_write;
++
++      return 0;
++
++err_out:
++      printk(KERN_ERR "EXT3-fs: Unable to create %s\n", name);
++      remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_mb_proc);
++      remove_proc_entry(devname, proc_root_ext3);
++      sbi->s_mb_proc = NULL;
++
++      return -ENOMEM;
++}
++
++int ext3_mb_destroy_per_dev_proc(struct super_block *sb)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      char devname[64];
++
++      if (sbi->s_mb_proc == NULL)
++              return -EINVAL;
++
++      snprintf(devname, sizeof(devname) - 1, "%s",
++              bdevname(sb->s_bdev, devname));
++      remove_proc_entry(EXT3_MB_STREAM_REQ, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_ORDER2_REQ, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
++      remove_proc_entry(EXT3_MB_STATS_NAME, sbi->s_mb_proc);
++      remove_proc_entry(devname, proc_root_ext3);
++
++      return 0;
++}
++
++int __init init_ext3_proc(void)
++{
++      ext3_pspace_cachep =
++              kmem_cache_create("ext3_prealloc_space",
++                                   sizeof(struct ext3_prealloc_space),
++                                   0, SLAB_RECLAIM_ACCOUNT, NULL, NULL);
++      if (ext3_pspace_cachep == NULL)
++              return -ENOMEM;
++
++      proc_root_ext3 = proc_mkdir(EXT3_ROOT, proc_root_fs);
++      if (proc_root_ext3 == NULL)
++              printk(KERN_ERR "EXT3-fs: Unable to create %s\n", EXT3_ROOT);
++
++      return 0;
++}
++
++void exit_ext3_proc(void)
++{
++      /* XXX: synchronize_rcu(); */
++      kmem_cache_destroy(ext3_pspace_cachep);
++      remove_proc_entry(EXT3_ROOT, proc_root_fs);
++}
++
++
++/*
++ * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
++ * Returns 0 if success or error code
++ */
++int ext3_mb_mark_diskspace_used(struct ext3_allocation_context *ac, handle_t *handle)
++{
++      struct buffer_head *bitmap_bh = NULL;
++      struct ext3_super_block *es;
++      struct ext3_group_desc *gdp;
++      struct buffer_head *gdp_bh;
++      struct ext3_sb_info *sbi;
++      struct super_block *sb;
++      sector_t block;
++      int err;
++
++      BUG_ON(ac->ac_status != AC_STATUS_FOUND);
++      BUG_ON(ac->ac_b_ex.fe_len <= 0);
++
++      sb = ac->ac_sb;
++      sbi = EXT3_SB(sb);
++      es = sbi->s_es;
++
++      ext3_debug("using block group %d(%d)\n", ac->ac_b_group.group,
++                      gdp->bg_free_blocks_count);
++
++      err = -EIO;
++      bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
++      if (!bitmap_bh)
++              goto out_err;
++
++      err = ext3_journal_get_write_access(handle, bitmap_bh);
++      if (err)
++              goto out_err;
++
++      err = -EIO;
++      gdp = ext3_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
++      if (!gdp)
++              goto out_err;
++
++      err = ext3_journal_get_write_access(handle, gdp_bh);
++      if (err)
++              goto out_err;
++
++      block = ac->ac_b_ex.fe_group * EXT3_BLOCKS_PER_GROUP(sb)
++              + ac->ac_b_ex.fe_start
++              + le32_to_cpu(es->s_first_data_block);
++
++      if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
++                      block == le32_to_cpu(gdp->bg_inode_bitmap) ||
++                      in_range(block, le32_to_cpu(gdp->bg_inode_table),
++                              EXT3_SB(sb)->s_itb_per_group))
++              ext3_error(sb, __FUNCTION__,
++                         "Allocating block in system zone - block = %lu",
++                         (unsigned long) block);
++#ifdef AGGRESSIVE_CHECK
++      {
++              int i;
++              for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
++                      BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
++                                              bitmap_bh->b_data));
++              }
++      }
++#endif
++      mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
++                  ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
++
++      spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
++      gdp->bg_free_blocks_count =
++              cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
++                              - ac->ac_b_ex.fe_len);
++      spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
++      percpu_counter_mod(&sbi->s_freeblocks_counter, - ac->ac_b_ex.fe_len);
++
++      err = ext3_journal_dirty_metadata(handle, bitmap_bh);
++      if (err)
++              goto out_err;
++      err = ext3_journal_dirty_metadata(handle, gdp_bh);
++
++out_err:
++      sb->s_dirt = 1;
++      brelse(bitmap_bh);
++      return err;
++}
++
++/*
++ * here we normalize request for locality group
++ * XXX: should we try to preallocate more than the group has now?
++ */
++void ext3_mb_normalize_group_request(struct ext3_allocation_context *ac)
++{
++      struct super_block *sb = ac->ac_sb;
++      struct ext3_locality_group *lg = ac->ac_lg;
++
++      BUG_ON(lg == NULL);
++      if (EXT3_SB(sb)->s_stripe)
++              ac->ac_g_ex.fe_len = EXT3_SB(sb)->s_stripe;
++      else
++              ac->ac_g_ex.fe_len = (1024 * 1024) >> sb->s_blocksize_bits;
++
++      mb_debug("#%u: goal %u blocks for locality group\n",
++              current->pid, ac->ac_g_ex.fe_len);
++}
++
++/*
++ * Normalization means making request better in terms of
++ * size and alignment
++ */
++void ext3_mb_normalize_request(struct ext3_allocation_context *ac,
++                              struct ext3_allocation_request *ar)
++{
++      struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
++      loff_t start, end, size, orig_size, orig_start;
++      struct list_head *cur;
++      int bsbits, max;
++
++      /* do normalize only data requests, metadata requests
++         do not need preallocation */
++      if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
++              return;
++
++      /* sometime caller may want exact blocks */
++      if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
++              return;
++
++      /* caller may indicate that preallocation isn't
++       * required (it's a tail, for example) */
++      if (ac->ac_flags & EXT3_MB_HINT_NOPREALLOC)
++              return;
++
++      if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
++              return ext3_mb_normalize_group_request(ac);
++
++      bsbits = ac->ac_sb->s_blocksize_bits;
++
++      /* first, let's learn actual file size
++       * given current request is allocated */
++      size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
++      size = size << bsbits;
++      if (size < i_size_read(ac->ac_inode))
++              size = i_size_read(ac->ac_inode);
++
++      /* max available blocks in a free group */
++      max = EXT3_BLOCKS_PER_GROUP(ac->ac_sb) - 1 - 1
++              - EXT3_SB(ac->ac_sb)->s_itb_per_group;
++
++#define NRL_CHECK_SIZE(req,size,max,bits)     \
++              (req <= (size) || max <= ((size) >> bits))
++
++      /* first, try to predict filesize */
++      /* XXX: should this table be tunable? */
++      start = 0;
++      if (size <= 16 * 1024) {
++              size = 16 * 1024;
++      } else if (size <= 32 * 1024) {
++              size = 32 * 1024;
++      } else if (size <= 64 * 1024) {
++              size = 64 * 1024;
++      } else if (size <= 128 * 1024) {
++              size = 128 * 1024;
++      } else if (size <= 256 * 1024) {
++              size = 256 * 1024;
++      } else if (size <= 512 * 1024) {
++              size = 512 * 1024;
++      } else if (size <= 1024 * 1024) {
++              size = 1024 * 1024;
++      } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, bsbits)) {
++              start = ac->ac_o_ex.fe_logical << bsbits;
++              start = (start / (1024 * 1024)) * (1024 * 1024);
++              size = 1024 * 1024;
++      } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, bsbits)) {
++              start = ac->ac_o_ex.fe_logical << bsbits;
++              start = (start / (4 * (1024 * 1024))) * 4 * (1024 * 1024);
++              size = 4 * 1024 * 1024;
++      } else if(NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,(8<<20)>>bsbits,max,bsbits)){
++              start = ac->ac_o_ex.fe_logical;
++              start = start << bsbits;
++              start = (start / (8 * (1024 * 1024))) * 8 * (1024 * 1024);
++              size = 8 * 1024 * 1024;
++      } else {
++              start = ac->ac_o_ex.fe_logical;
++              start = start << bsbits;
++              size = ac->ac_o_ex.fe_len << bsbits;
++      }
++      orig_size = size = size >> bsbits;
++      orig_start = start = start >> bsbits;
++
++      /* don't cover already allocated blocks in selected range */
++      if (ar->pleft && start <= ar->lleft) {
++              size -= ar->lleft + 1 - start;
++              start = ar->lleft + 1;
++      }
++      if (ar->pright && start + size - 1 >= ar->lright)
++              size -= start + size - ar->lright;
++
++      end = start + size;
++
++      /* check we don't cross already preallocated blocks */
++      rcu_read_lock();
++      list_for_each_rcu(cur, &ei->i_prealloc_list) {
++              struct ext3_prealloc_space *pa;
++              unsigned long pa_end;
++
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
++
++              if (pa->pa_deleted)
++                      continue;
++              spin_lock(&pa->pa_lock);
++              if (pa->pa_deleted) {
++                      spin_unlock(&pa->pa_lock);
++                      continue;
++              }
++
++              pa_end = pa->pa_lstart + pa->pa_len;
++
++              /* PA must not overlap original request */
++              BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
++                      ac->ac_o_ex.fe_logical < pa->pa_lstart));
++
++              /* skip PA normalized request doesn't overlap with */
++              if (pa->pa_lstart >= end) {
++                      spin_unlock(&pa->pa_lock);
++                      continue;
++              }
++              if (pa_end <= start) {
++                      spin_unlock(&pa->pa_lock);
++                      continue;
++              }
++              BUG_ON(pa->pa_lstart <= start && pa_end >= end);
++
++              if (pa_end <= ac->ac_o_ex.fe_logical) {
++                      BUG_ON(pa_end < start);
++                      start = pa_end;
++              }
++
++              if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
++                      BUG_ON(pa->pa_lstart > end);
++                      end = pa->pa_lstart;
++              }
++              spin_unlock(&pa->pa_lock);
++      }
++      rcu_read_unlock();
++      size = end - start;
++
++      /* XXX: extra loop to check we really don't overlap preallocations */
++      rcu_read_lock();
++      list_for_each_rcu(cur, &ei->i_prealloc_list) {
++              struct ext3_prealloc_space *pa;
++              unsigned long pa_end;
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
++              spin_lock(&pa->pa_lock);
++              if (pa->pa_deleted == 0) {
++                      pa_end = pa->pa_lstart + pa->pa_len;
++                      BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
++              }
++              spin_unlock(&pa->pa_lock);
++      }
++      rcu_read_unlock();
++
++      if (start + size <= ac->ac_o_ex.fe_logical &&
++                      start > ac->ac_o_ex.fe_logical) {
++              printk("start %lu, size %lu, fe_logical %lu\n",
++                      (unsigned long) start, (unsigned long) size,
++                      (unsigned long) ac->ac_o_ex.fe_logical);
++      }
++      BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
++                      start > ac->ac_o_ex.fe_logical);
++
++      /* now prepare goal request */
++      BUG_ON(size <= 0 || size >= EXT3_BLOCKS_PER_GROUP(ac->ac_sb));
++      if (size < ac->ac_o_ex.fe_len) {
++              /* XXX: don't normalize tails? */
++      }
++
++      /* XXX: is it better to align blocks WRT to logical placement
++       * or satisfy big request as is */
++      ac->ac_g_ex.fe_logical = start;
++      ac->ac_g_ex.fe_len = size;
++
++      mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
++              (unsigned) orig_size, (unsigned) start);
++}
++
++void ext3_mb_collect_stats(struct ext3_allocation_context *ac)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
++
++      if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
++              atomic_inc(&sbi->s_bal_reqs);
++              atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++              if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
++                      atomic_inc(&sbi->s_bal_success);
++              atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++              if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
++                              ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
++                      atomic_inc(&sbi->s_bal_goals);
++              if (ac->ac_found > sbi->s_mb_max_to_scan)
++                      atomic_inc(&sbi->s_bal_breaks);
++      }
++
++      ext3_mb_store_history(ac);
++}
++
++/*
++ * use blocks preallocated to inode
++ */
++void ext3_mb_use_inode_pa(struct ext3_allocation_context *ac,
++                              struct ext3_prealloc_space *pa)
++{
++      unsigned long start, len;
++
++      /* found preallocated blocks, use them */
++      start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
++      len = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
++      len = len - start;
++      ext3_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
++                                      &ac->ac_b_ex.fe_start);
++      ac->ac_b_ex.fe_len = len;
++      ac->ac_status = AC_STATUS_FOUND;
++      ac->ac_pa = pa;
++
++      BUG_ON(start < pa->pa_pstart);
++      BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
++      BUG_ON(pa->pa_free < len);
++      pa->pa_free -= len;
++
++      mb_debug("use %lu/%lu from inode pa %p\n", start, len, pa);
++}
++
++/*
++ * use blocks preallocated to locality group
++ */
++void ext3_mb_use_group_pa(struct ext3_allocation_context *ac,
++                              struct ext3_prealloc_space *pa)
++{
++      unsigned len = ac->ac_o_ex.fe_len;
++
++      ext3_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
++                                      &ac->ac_b_ex.fe_group,
++                                      &ac->ac_b_ex.fe_start);
++      ac->ac_b_ex.fe_len = len;
++      ac->ac_status = AC_STATUS_FOUND;
++      ac->ac_pa = pa;
++
++      /* we don't correct pa_pstart or pa_plen here to avoid
++       * possible race when tte group is being loaded concurrently
++       * instead we correct pa later, after blocks are marked
++       * in on-disk bitmap -- see ext3_mb_release_context() */
++      mb_debug("use %lu/%lu from group pa %p\n", pa->pa_lstart-len, len, pa);
++}
++
++/*
++ * search goal blocks in preallocated space
++ */
++int ext3_mb_use_preallocated(struct ext3_allocation_context *ac)
++{
++      struct ext3_inode_info *ei = EXT3_I(ac->ac_inode);
++      struct ext3_locality_group *lg;
++      struct ext3_prealloc_space *pa;
++      struct list_head *cur;
++
++      /* only data can be preallocated */
++      if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
++              return 0;
++
++      /* first, try per-file preallocation */
++      rcu_read_lock();
++      list_for_each_rcu(cur, &ei->i_prealloc_list) {
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
++
++              /* all fields in this condition don't change,
++               * so we can skip locking for them */
++              if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
++                      ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
++                      continue;
++
++              /* found preallocated blocks, use them */
++              spin_lock(&pa->pa_lock);
++              if (pa->pa_deleted == 0 && pa->pa_free) {
++                      atomic_inc(&pa->pa_count);
++                      ext3_mb_use_inode_pa(ac, pa);
++                      spin_unlock(&pa->pa_lock);
++                      ac->ac_criteria = 10;
++                      rcu_read_unlock();
++                      return 1;
++              }
++              spin_unlock(&pa->pa_lock);
++      }
++      rcu_read_unlock();
++
++      /* can we use group allocation? */
++      if (!(ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC))
++              return 0;
++
++      /* inode may have no locality group for some reason */
++      lg = ac->ac_lg;
++      if (lg == NULL)
++              return 0;
++
++      rcu_read_lock();
++      list_for_each_rcu(cur, &lg->lg_prealloc_list) {
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_inode_list);
++              spin_lock(&pa->pa_lock);
++              if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
++                      atomic_inc(&pa->pa_count);
++                      ext3_mb_use_group_pa(ac, pa);
++                      spin_unlock(&pa->pa_lock);
++                      ac->ac_criteria = 20;
++                      rcu_read_unlock();
++                      return 1;
++              }
++              spin_unlock(&pa->pa_lock);
++      }
++      rcu_read_unlock();
++
++      return 0;
++}
++
++/*
++ * the function goes through all preallocation in this group and marks them
++ * used in in-core bitmap. buddy must be generated from this bitmap
++ */
++void ext3_mb_generate_from_pa(struct super_block *sb, void *bitmap, int group)
++{
++      struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
++      struct ext3_prealloc_space *pa;
++      struct list_head *cur;
++      unsigned long groupnr;
++      unsigned long start;
++      int preallocated = 0, count = 0, len;
++
++      /* all form of preallocation discards first load group,
++       * so the only competing code is preallocation use.
++       * we don't need any locking here
++       * notice we do NOT ignore preallocations with pa_deleted
++       * otherwise we could leave used blocks available for
++       * allocation in buddy when concurrent ext3_mb_put_pa()
++       * is dropping preallocation
++       */
++      list_for_each_rcu(cur, &grp->bb_prealloc_list) {
++              pa = list_entry(cur, struct ext3_prealloc_space, pa_group_list);
++              spin_lock(&pa->pa_lock);
++              ext3_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start);
++              len = pa->pa_len;
++              spin_unlock(&pa->pa_lock);
++              if (unlikely(len == 0))
++                      continue;
++              BUG_ON(groupnr != group && len != 0);
++              mb_set_bits(sb_bgl_lock(EXT3_SB(sb), group), bitmap, start,len);
++              preallocated += len;
++              count++;
++      }
++      mb_debug("prellocated %u for group %u\n", preallocated, group);
++}
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
++static void ext3_mb_pa_callback(struct rcu_head *head)
++{
++      struct ext3_prealloc_space *pa;
++      pa = container_of(head, struct ext3_prealloc_space, u.pa_rcu);
++      kmem_cache_free(ext3_pspace_cachep, pa);
++}
++#define mb_call_rcu(__pa)     call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback)
++#else
++static void ext3_mb_pa_callback(void *pa)
++{
++      kmem_cache_free(ext3_pspace_cachep, pa);
++}
++#define mb_call_rcu(__pa)     call_rcu(&(__pa)->u.pa_rcu, ext3_mb_pa_callback, pa)
++#endif
++
++/*
++ * drops a reference to preallocated space descriptor
++ * if this was the last reference and the space is consumed
++ */
++void ext3_mb_put_pa(struct ext3_allocation_context *ac,
++                      struct super_block *sb, struct ext3_prealloc_space *pa)
++{
++      unsigned long grp;
++
++      if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
++              return;
++
++      /* in this short window concurrent discard can set pa_deleted */
++      spin_lock(&pa->pa_lock);
++      if (pa->pa_deleted == 0) {
++              spin_unlock(&pa->pa_lock);
++              return;
++      }
++
++      pa->pa_deleted = 1;
++      spin_unlock(&pa->pa_lock);
++
++      /* -1 is to protect from crossing allocation group */
++      ext3_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
++
++      /*
++       * possible race:
++       *
++       *  P1 (buddy init)                     P2 (regular allocation)
++       *                                      find block B in PA
++       *  copy on-disk bitmap to buddy
++       *                                      mark B in on-disk bitmap
++       *                                      drop PA from group
++       *  mark all PAs in buddy
++       *
++       * thus, P1 initializes buddy with B available. to prevent this
++       * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
++       * against that pair
++       */
++      ext3_lock_group(sb, grp);
++      list_del_rcu(&pa->pa_group_list);
++      ext3_unlock_group(sb, grp);
++
++      spin_lock(pa->pa_obj_lock);
++      list_del_rcu(&pa->pa_inode_list);
++      spin_unlock(pa->pa_obj_lock);
++
++      mb_call_rcu(pa);
++}
++
++/*
++ * creates new preallocated space for given inode
++ */
++int ext3_mb_new_inode_pa(struct ext3_allocation_context *ac)
++{
++      struct super_block *sb = ac->ac_sb;
++      struct ext3_prealloc_space *pa;
++      struct ext3_group_info *grp;
++      struct ext3_inode_info *ei;
++
++      /* preallocate only when found space is larger then requested */
++      BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
++      BUG_ON(ac->ac_status != AC_STATUS_FOUND);
++      BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
++
++      pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
++      if (pa == NULL)
++              return -ENOMEM;
++
++      if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
++              int winl, wins, win, offs;
++
++              /* we can't allocate as much as normalizer wants.
++               * so, found space must get proper lstart
++               * to cover original request */
++              BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
++              BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
++
++              /* we're limited by original request in that
++               * logical block must be covered any way
++               * winl is window we can move our chunk within */
++              winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
++
++              /* also, we should cover whole original request */
++              wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
++
++              /* the smallest one defines real window */
++              win = min(winl, wins);
++
++              offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
++              if (offs && offs < win)
++                      win = offs;
++
++              ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
++              BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
++              BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
++      }
++
++      /* preallocation can change ac_b_ex, thus we store actually
++       * allocated blocks for history */
++      ac->ac_f_ex = ac->ac_b_ex;
++
++      pa->pa_lstart = ac->ac_b_ex.fe_logical;
++      pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
++      pa->pa_len = ac->ac_b_ex.fe_len;
++      pa->pa_free = pa->pa_len;
++      atomic_set(&pa->pa_count, 1);
++      spin_lock_init(&pa->pa_lock);
++      pa->pa_deleted = 0;
++      pa->pa_linear = 0;
++
++      mb_debug("new inode pa %p: %lu/%lu for %lu\n", pa,
++                      pa->pa_pstart, pa->pa_len, pa->pa_lstart);
++
++      ext3_mb_use_inode_pa(ac, pa);
++      atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
++
++      ei = EXT3_I(ac->ac_inode);
++      grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
++
++      pa->pa_obj_lock = &ei->i_prealloc_lock;
++      pa->pa_inode = ac->ac_inode;
++
++      ext3_lock_group(sb, ac->ac_b_ex.fe_group);
++      list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
++      ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
++
++      spin_lock(pa->pa_obj_lock);
++      list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
++      spin_unlock(pa->pa_obj_lock);
++
++      return 0;
++}
++
++/*
++ * creates new preallocated space for locality group inodes belongs to
++ */
++int ext3_mb_new_group_pa(struct ext3_allocation_context *ac)
++{
++      struct super_block *sb = ac->ac_sb;
++      struct ext3_locality_group *lg;
++      struct ext3_prealloc_space *pa;
++      struct ext3_group_info *grp;
++
++      /* preallocate only when found space is larger then requested */
++      BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
++      BUG_ON(ac->ac_status != AC_STATUS_FOUND);
++      BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
++
++      BUG_ON(ext3_pspace_cachep == NULL);
++      pa = kmem_cache_alloc(ext3_pspace_cachep, GFP_NOFS);
++      if (pa == NULL)
++              return -ENOMEM;
++
++      /* preallocation can change ac_b_ex, thus we store actually
++       * allocated blocks for history */
++      ac->ac_f_ex = ac->ac_b_ex;
++
++      pa->pa_pstart = ext3_grp_offs_to_block(sb, &ac->ac_b_ex);
++      pa->pa_lstart = pa->pa_pstart;
++      pa->pa_len = ac->ac_b_ex.fe_len;
++      pa->pa_free = pa->pa_len;
++      atomic_set(&pa->pa_count, 1);
++      spin_lock_init(&pa->pa_lock);
++      pa->pa_deleted = 0;
++      pa->pa_linear = 1;
++
++      mb_debug("new group pa %p: %lu/%lu for %lu\n", pa,
++                      pa->pa_pstart, pa->pa_len, pa->pa_lstart);
++
++      ext3_mb_use_group_pa(ac, pa);
++      atomic_add(pa->pa_free, &EXT3_SB(sb)->s_mb_preallocated);
++
++      grp = EXT3_GROUP_INFO(sb, ac->ac_b_ex.fe_group);
++      lg = ac->ac_lg;
++      BUG_ON(lg == NULL);
++
++      pa->pa_obj_lock = &lg->lg_prealloc_lock;
++      pa->pa_inode = NULL;
++
++      ext3_lock_group(sb, ac->ac_b_ex.fe_group);
++      list_add_rcu(&pa->pa_group_list, &grp->bb_prealloc_list);
++      ext3_unlock_group(sb, ac->ac_b_ex.fe_group);
++
++      spin_lock(pa->pa_obj_lock);
++      list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
++      spin_unlock(pa->pa_obj_lock);
++
++      return 0;
++}
++
++int ext3_mb_new_preallocation(struct ext3_allocation_context *ac)
++{
++      int err;
++
++      if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
++              err = ext3_mb_new_group_pa(ac);
++      else
++              err = ext3_mb_new_inode_pa(ac);
++      return err;
++}
++
++/*
++ * finds all unused blocks in on-disk bitmap, frees them in
++ * in-core bitmap and buddy.
++ * @pa must be unlinked from inode and group lists, so that
++ * nobody else can find/use it.
++ * the caller MUST hold group/inode locks.
++ * TODO: optimize the case when there are no in-core structures yet
++ */
++int ext3_mb_release_inode_pa(struct ext3_buddy *e3b,
++                              struct buffer_head *bitmap_bh,
++                              struct ext3_prealloc_space *pa)
++{
++      struct ext3_allocation_context ac;
++      struct super_block *sb = e3b->bd_sb;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      unsigned long bit, end, next, group;
++      sector_t start;
++      int err = 0, free = 0;
++
++      BUG_ON(pa->pa_deleted == 0);
++      ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
++      BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
++      end = bit + pa->pa_len;
++
++      ac.ac_sb = sb;
++      ac.ac_inode = pa->pa_inode;
++      ac.ac_op = EXT3_MB_HISTORY_DISCARD;
++
++      while (bit < end) {
++              bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
++              if (bit >= end)
++                      break;
++              next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
++              if (next > end)
++                      next = end;
++              start = group * EXT3_BLOCKS_PER_GROUP(sb) + bit +
++                              le32_to_cpu(sbi->s_es->s_first_data_block);
++              mb_debug("    free preallocated %u/%u in group %u\n",
++                              (unsigned) start, (unsigned) next - bit,
++                              (unsigned) group);
++              free += next - bit;
++
++              ac.ac_b_ex.fe_group = group;
++              ac.ac_b_ex.fe_start = bit;
++              ac.ac_b_ex.fe_len = next - bit;
++              ac.ac_b_ex.fe_logical = 0;
++              ext3_mb_store_history(&ac);
++
++              mb_free_blocks(pa->pa_inode, e3b, bit, next - bit);
++              bit = next + 1;
++      }
++      if (free != pa->pa_free) {
++              printk("pa %p: logic %lu, phys. %lu, len %lu\n",
++                      pa, (unsigned long) pa->pa_lstart,
++                      (unsigned long) pa->pa_pstart,
++                      (unsigned long) pa->pa_len);
++              printk("free %u, pa_free %u\n", free, pa->pa_free);
++      }
++      BUG_ON(free != pa->pa_free);
++      atomic_add(free, &sbi->s_mb_discarded);
++
++      return err;
++}
++
++int ext3_mb_release_group_pa(struct ext3_buddy *e3b,
++                              struct ext3_prealloc_space *pa)
++{
++      struct ext3_allocation_context ac;
++      struct super_block *sb = e3b->bd_sb;
++      unsigned long bit, group;
++
++      ac.ac_op = EXT3_MB_HISTORY_DISCARD;
++
++      BUG_ON(pa->pa_deleted == 0);
++      ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
++      BUG_ON(group != e3b->bd_group && pa->pa_len != 0);
++      mb_free_blocks(pa->pa_inode, e3b, bit, pa->pa_len);
++      atomic_add(pa->pa_len, &EXT3_SB(sb)->s_mb_discarded);
++
++      ac.ac_sb = sb;
++      ac.ac_inode = NULL;
++      ac.ac_b_ex.fe_group = group;
++      ac.ac_b_ex.fe_start = bit;
++      ac.ac_b_ex.fe_len = pa->pa_len;
++      ac.ac_b_ex.fe_logical = 0;
++      ext3_mb_store_history(&ac);
++
++      return 0;
++}
++
++/*
++ * releases all preallocations in given group
++ *
++ * first, we need to decide discard policy:
++ * - when do we discard
++ *   1) ENOSPC
++ * - how many do we discard
++ *   1) how many requested
++ */
++int ext3_mb_discard_group_preallocations(struct super_block *sb,
++                                              int group, int needed)
++{
++      struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, group);
++      struct buffer_head *bitmap_bh = NULL;
++      struct ext3_prealloc_space *pa, *tmp;
++      struct list_head list;
++      struct ext3_buddy e3b;
++      int err, busy, free = 0;
++
++      mb_debug("discard preallocation for group %lu\n", group);
++
++      if (list_empty(&grp->bb_prealloc_list))
++              return 0;
++
++      bitmap_bh = read_block_bitmap(sb, group);
++      if (bitmap_bh == NULL) {
++              /* error handling here */
++              ext3_mb_release_desc(&e3b);
++              BUG_ON(bitmap_bh == NULL);
++      }
++
++      err = ext3_mb_load_buddy(sb, group, &e3b);
++      BUG_ON(err != 0); /* error handling here */
++
++      if (needed == 0)
++              needed = EXT3_BLOCKS_PER_GROUP(sb) + 1;
++
++      grp = EXT3_GROUP_INFO(sb, group);
++      INIT_LIST_HEAD(&list);
++
++repeat:
++      busy = 0;
++      ext3_lock_group(sb, group);
++      list_for_each_entry_safe (pa, tmp, &grp->bb_prealloc_list, pa_group_list) {
++              spin_lock(&pa->pa_lock);
++              if (atomic_read(&pa->pa_count)) {
++                      spin_unlock(&pa->pa_lock);
++                      printk("uh! busy PA\n");
++                      dump_stack();
++                      busy = 1;
++                      continue;
++              }
++              if (pa->pa_deleted) {
++                      spin_unlock(&pa->pa_lock);
++                      continue;
++              }
++
++              /* seems this one can be freed ... */
++              pa->pa_deleted = 1;
++
++              /* we can trust pa_free ... */
++              free += pa->pa_free;
++
++              spin_unlock(&pa->pa_lock);
++
++              list_del_rcu(&pa->pa_group_list);
++              list_add(&pa->u.pa_tmp_list, &list);
++      }
++
++      /* if we still need more blocks and some PAs were used, try again */
++      if (free < needed && busy) {
++              ext3_unlock_group(sb, group);
++              goto repeat;
++      }
++
++      /* found anything to free? */
++      if (list_empty(&list)) {
++              BUG_ON(free != 0);
++              goto out;
++      }
++
++      /* now free all selected PAs */
++      list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
++
++              /* remove from object (inode or locality group) */
++              spin_lock(pa->pa_obj_lock);
++              list_del_rcu(&pa->pa_inode_list);
++              spin_unlock(pa->pa_obj_lock);
++
++              if (pa->pa_linear)
++                      ext3_mb_release_group_pa(&e3b, pa);
++              else
++                      ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
++
++              list_del(&pa->u.pa_tmp_list);
++              mb_call_rcu(pa);
++      }
++
++out:
++      ext3_unlock_group(sb, group);
++      ext3_mb_release_desc(&e3b);
++      brelse(bitmap_bh);
++      return free;
++}
++
++/*
++ * releases all non-used preallocated blocks for given inode
++ */
++void ext3_mb_discard_inode_preallocations(struct inode *inode)
++{
++      struct ext3_inode_info *ei = EXT3_I(inode);
++      struct super_block *sb = inode->i_sb;
++      struct buffer_head *bitmap_bh = NULL;
++      struct ext3_prealloc_space *pa, *tmp;
++      unsigned long group = 0;
++      struct list_head list;
++      struct ext3_buddy e3b;
++      int err;
++
++      if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
++              /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
++              return;
++      }
++
++      mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
++
++      INIT_LIST_HEAD(&list);
++
++repeat:
++      /* first, collect all pa's in the inode */
++      spin_lock(&ei->i_prealloc_lock);
++      while (!list_empty(&ei->i_prealloc_list)) {
++              pa = list_entry(ei->i_prealloc_list.next,
++                              struct ext3_prealloc_space, pa_inode_list);
++              BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
++              spin_lock(&pa->pa_lock);
++              if (atomic_read(&pa->pa_count)) {
++                      /* this shouldn't happen often - nobody should
++                       * use preallocation while we're discarding it */
++                      spin_unlock(&pa->pa_lock);
++                      spin_unlock(&ei->i_prealloc_lock);
++                      printk("uh-oh! used pa while discarding\n");
++                      dump_stack();
++                      current->state = TASK_UNINTERRUPTIBLE;
++                      schedule_timeout(HZ);
++                      goto repeat;
++
++              }
++              if (pa->pa_deleted == 0) {
++                      pa->pa_deleted = 1;
++                      spin_unlock(&pa->pa_lock);
++                      list_del_rcu(&pa->pa_inode_list);
++                      list_add(&pa->u.pa_tmp_list, &list);
++                      continue;
++              }
++
++              /* someone is deleting pa right now */
++              spin_unlock(&pa->pa_lock);
++              spin_unlock(&ei->i_prealloc_lock);
++
++              /* we have to wait here because pa_deleted
++               * doesn't mean pa is already unlinked from
++               * the list. as we might be called from
++               * ->clear_inode() the inode will get freed
++               * and concurrent thread which is unlinking
++               * pa from inode's list may access already
++               * freed memory, bad-bad-bad */
++
++              /* XXX: if this happens too often, we can
++               * add a flag to force wait only in case
++               * of ->clear_inode(), but not in case of
++               * regular truncate */
++              printk("uh-oh! some one just deleted it\n");
++              dump_stack();
++              current->state = TASK_UNINTERRUPTIBLE;
++              schedule_timeout(HZ);
++              goto repeat;
++      }
++      spin_unlock(&ei->i_prealloc_lock);
++
++      list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
++              BUG_ON(pa->pa_linear != 0);
++              ext3_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
++
++              err = ext3_mb_load_buddy(sb, group, &e3b);
++              BUG_ON(err != 0); /* error handling here */
++
++              bitmap_bh = read_block_bitmap(sb, group);
++              if (bitmap_bh == NULL) {
++                      /* error handling here */
++                      ext3_mb_release_desc(&e3b);
++                      BUG_ON(bitmap_bh == NULL);
++              }
++
++              ext3_lock_group(sb, group);
++              list_del_rcu(&pa->pa_group_list);
++              ext3_mb_release_inode_pa(&e3b, bitmap_bh, pa);
++              ext3_unlock_group(sb, group);
++
++              ext3_mb_release_desc(&e3b);
++              brelse(bitmap_bh);
++
++              list_del(&pa->u.pa_tmp_list);
++              mb_call_rcu(pa);
++      }
++}
++
++/*
++ * finds all preallocated spaces and return blocks being freed to them
++ * if preallocated space becomes full (no block is used from the space)
++ * then the function frees space in buddy
++ * XXX: at the moment, truncate (which is the only way to free blocks)
++ * discards all preallocations
++ */
++void ext3_mb_return_to_preallocation(struct inode *inode, struct ext3_buddy *e3b,
++                                      sector_t block, int count)
++{
++      BUG_ON(!list_empty(&EXT3_I(inode)->i_prealloc_list));
++}
++
++void ext3_mb_show_ac(struct ext3_allocation_context *ac)
++{
++#if 0
++      struct super_block *sb = ac->ac_sb;
++      int i;
++
++      printk(KERN_ERR "EXT3-fs: can't allocate: status %d flags %d\n",
++                      ac->ac_status, ac->ac_flags);
++      printk(KERN_ERR "EXT3-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
++                      "best %lu/%lu/%lu@%lu cr %d\n",
++                      ac->ac_o_ex.fe_group, ac->ac_o_ex.fe_start,
++                      ac->ac_o_ex.fe_len, ac->ac_o_ex.fe_logical,
++                      ac->ac_g_ex.fe_group, ac->ac_g_ex.fe_start,
++                      ac->ac_g_ex.fe_len, ac->ac_g_ex.fe_logical,
++                      ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
++                      ac->ac_b_ex.fe_len, ac->ac_b_ex.fe_logical,
++                      ac->ac_criteria);
++      printk(KERN_ERR "EXT3-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
++              ac->ac_found);
++      printk("EXT3-fs: groups: ");
++      for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
++              struct ext3_group_info *grp = EXT3_GROUP_INFO(sb, i);
++              struct ext3_prealloc_space *pa;
++              unsigned long start;
++              struct list_head *cur;
++              list_for_each_rcu(cur, &grp->bb_prealloc_list) {
++                      pa = list_entry(cur, struct ext3_prealloc_space,
++                                      pa_group_list);
++                      spin_lock(&pa->pa_lock);
++                      ext3_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start);
++                      spin_unlock(&pa->pa_lock);
++                      printk("PA:%u:%lu:%u ", i, start, pa->pa_len);
++              }
++
++              if (grp->bb_free == 0)
++                      continue;
++              printk("%d: %d/%d ", i, grp->bb_free, grp->bb_fragments);
++      }
++      printk("\n");
++      //dump_stack();
++#endif
++}
++
++void ext3_mb_group_or_file(struct ext3_allocation_context *ac)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(ac->ac_sb);
++      int bsbits = ac->ac_sb->s_blocksize_bits;
++      loff_t size, isize;
++
++      if (!(ac->ac_flags & EXT3_MB_HINT_DATA))
++              return;
++
++      size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
++      isize = i_size_read(ac->ac_inode) >> bsbits;
++      if (size < isize)
++              size = isize;
++
++      /* don't use group allocation for large files */
++      if (size >= sbi->s_mb_stream_request)
++              return;
++
++      if (unlikely(ac->ac_flags & EXT3_MB_HINT_GOAL_ONLY))
++              return;
++
++      BUG_ON(ac->ac_lg != NULL);
++      ac->ac_lg = &sbi->s_locality_groups[smp_processor_id()];
++
++      /* we're going to use group allocation */
++      ac->ac_flags |= EXT3_MB_HINT_GROUP_ALLOC;
++
++      /* serialize all allocations in the group */
++      down(&ac->ac_lg->lg_sem);
++}
++
++int ext3_mb_initialize_context(struct ext3_allocation_context *ac,
++                              struct ext3_allocation_request *ar)
++{
++      struct super_block *sb = ar->inode->i_sb;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      struct ext3_super_block *es = sbi->s_es;
++      unsigned long group, len, goal;
++      unsigned long block;
++
++      /* we can't allocate > group size */
++      len = ar->len;
++      if (len >= EXT3_BLOCKS_PER_GROUP(sb) - 10)
++              len = EXT3_BLOCKS_PER_GROUP(sb) - 10;
++
++      /* start searching from the goal */
++      goal = ar->goal;
++      if (goal < le32_to_cpu(es->s_first_data_block) ||
++                      goal >= le32_to_cpu(es->s_blocks_count))
++              goal = le32_to_cpu(es->s_first_data_block);
++      ext3_get_group_no_and_offset(sb, goal, &group, &block);
++
++      /* set up allocation goals */
++      ac->ac_b_ex.fe_logical = ar->logical;
++      ac->ac_b_ex.fe_group = 0;
++      ac->ac_b_ex.fe_start = 0;
++      ac->ac_b_ex.fe_len = 0;
++      ac->ac_status = AC_STATUS_CONTINUE;
++      ac->ac_groups_scanned = 0;
++      ac->ac_ex_scanned = 0;
++      ac->ac_found = 0;
++      ac->ac_sb = sb;
++      ac->ac_inode = ar->inode;
++      ac->ac_o_ex.fe_logical = ar->logical;
++      ac->ac_o_ex.fe_group = group;
++      ac->ac_o_ex.fe_start = block;
++      ac->ac_o_ex.fe_len = len;
++      ac->ac_g_ex.fe_logical = ar->logical;
++      ac->ac_g_ex.fe_group = group;
++      ac->ac_g_ex.fe_start = block;
++      ac->ac_g_ex.fe_len = len;
++      ac->ac_f_ex.fe_len = 0;
++      ac->ac_flags = ar->flags;
++      ac->ac_2order = 0;
++      ac->ac_criteria = 0;
++      ac->ac_pa = NULL;
++      ac->ac_bitmap_page = NULL;
++      ac->ac_buddy_page = NULL;
++      ac->ac_lg = NULL;
++
++      /* we have to define context: we'll we work with a file or
++       * locality group. this is a policy, actually */
++      ext3_mb_group_or_file(ac);
++
++      mb_debug("init ac: %u blocks @ %llu, goal %llu, flags %x, 2^%d, "
++                      "left: %llu/%llu, right %llu/%llu to %swritable\n",
++                      (unsigned) ar->len, (unsigned) ar->logical,
++                      (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
++                      (unsigned) ar->lleft, (unsigned) ar->pleft,
++                      (unsigned) ar->lright, (unsigned) ar->pright,
++                      atomic_read(&ar->inode->i_writecount) ? "" : "non-");
++      return 0;
++
++}
++
++/*
++ * release all resource we used in allocation
++ */
++int ext3_mb_release_context(struct ext3_allocation_context *ac)
++{
++      if (ac->ac_pa) {
++              if (ac->ac_pa->pa_linear) {
++                      /* see comment in ext3_mb_use_group_pa() */
++                      spin_lock(&ac->ac_pa->pa_lock);
++                      ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
++                      ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
++                      ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
++                      ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
++                      spin_unlock(&ac->ac_pa->pa_lock);
++              }
++              ext3_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
++      }
++      if (ac->ac_bitmap_page)
++              page_cache_release(ac->ac_bitmap_page);
++      if (ac->ac_buddy_page)
++              page_cache_release(ac->ac_buddy_page);
++      if (ac->ac_flags & EXT3_MB_HINT_GROUP_ALLOC)
++              up(&ac->ac_lg->lg_sem);
++      ext3_mb_collect_stats(ac);
++      return 0;
++}
++
++int ext3_mb_discard_preallocations(struct super_block *sb, int needed)
++{
++      int i, ret, freed = 0;
++
++      for (i = 0; i < EXT3_SB(sb)->s_groups_count && needed > 0; i++) {
++              ret = ext3_mb_discard_group_preallocations(sb, i, needed);
++              freed += ret;
++              needed -= ret;
++      }
++
++      return freed;
++}
++
++/*
++ * Main entry point into mballoc to allocate blocks
++ * it tries to use preallocation first, then falls back
++ * to usual allocation
++ */
++unsigned long ext3_mb_new_blocks(handle_t *handle,
++                               struct ext3_allocation_request *ar, int *errp)
++{
++      struct ext3_allocation_context ac;
++      struct ext3_sb_info *sbi;
++      struct super_block *sb;
++      unsigned long block = 0;
++      int freed, inquota;
++
++      sb = ar->inode->i_sb;
++      sbi = EXT3_SB(sb);
++
++      if (!test_opt(sb, MBALLOC)) {
++              static int ext3_mballoc_warning = 0;
++              if (ext3_mballoc_warning++ == 0)
++                      printk(KERN_ERR "EXT3-fs: multiblock request with "
++                                      "mballoc disabled!\n");
++              ar->len = 1;
++              block = ext3_new_block_old(handle, ar->inode, ar->goal, errp);
++              return block;
++      }
++
++      while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
++              ar->flags |= EXT3_MB_HINT_NOPREALLOC;
++              ar->len--;
++      }
++      if (ar->len == 0) {
++              *errp = -EDQUOT;
++              return 0;
++      }
++      inquota = ar->len;
++
++      ext3_mb_poll_new_transaction(sb, handle);
++
++      if ((*errp = ext3_mb_initialize_context(&ac, ar))) {
++              ar->len = 0;
++              goto out;
++      }
++
++      ac.ac_op = EXT3_MB_HISTORY_PREALLOC;
++      if (!ext3_mb_use_preallocated(&ac)) {
++
++              ac.ac_op = EXT3_MB_HISTORY_ALLOC;
++              ext3_mb_normalize_request(&ac, ar);
++
++repeat:
++              /* allocate space in core */
++              ext3_mb_regular_allocator(&ac);
++
++              /* as we've just preallocated more space than
++               * user requested orinally, we store allocated
++               * space in a special descriptor */
++              if (ac.ac_status == AC_STATUS_FOUND &&
++                              ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len)
++                      ext3_mb_new_preallocation(&ac);
++      }
++
++      if (likely(ac.ac_status == AC_STATUS_FOUND)) {
++              ext3_mb_mark_diskspace_used(&ac, handle);
++              *errp = 0;
++              block = ext3_grp_offs_to_block(sb, &ac.ac_b_ex);
++              ar->len = ac.ac_b_ex.fe_len;
++      } else {
++              freed  = ext3_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len);
++              if (freed)
++                      goto repeat;
++              *errp = -ENOSPC;
++              ac.ac_b_ex.fe_len = 0;
++              ar->len = 0;
++              ext3_mb_show_ac(&ac);
++      }
++
++      ext3_mb_release_context(&ac);
++
++out:
++      if (ar->len < inquota)
++              DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
++
++      return block;
++}
++EXPORT_SYMBOL(ext3_mb_new_blocks);
++
++int ext3_new_block(handle_t *handle, struct inode *inode,
++                 unsigned long goal, int *errp)
++{
++      struct ext3_allocation_request ar;
++      unsigned long ret;
++
++      if (!test_opt(inode->i_sb, MBALLOC)) {
++              ret = ext3_new_block_old(handle, inode, goal, errp);
++              return ret;
++      }
++
++      ar.inode = inode;
++      ar.goal = goal;
++      ar.len = 1;
++      ar.logical = 0;
++      ar.lleft = 0;
++      ar.pleft = 0;
++      ar.lright = 0;
++      ar.pright = 0;
++      ar.flags = 0;
++      ret = ext3_mb_new_blocks(handle, &ar, errp);
++      return ret;
++}
++
++void ext3_mb_poll_new_transaction(struct super_block *sb, handle_t *handle)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++
++      if (sbi->s_last_transaction == handle->h_transaction->t_tid)
++              return;
++
++      /* new transaction! time to close last one and free blocks for
++       * committed transaction. we know that only transaction can be
++       * active, so previos transaction can be being logged and we
++       * know that transaction before previous is known to be already
++       * logged. this means that now we may free blocks freed in all
++       * transactions before previous one. hope I'm clear enough ... */
++
++      spin_lock(&sbi->s_md_lock);
++      if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
++              mb_debug("new transaction %lu, old %lu\n",
++                              (unsigned long) handle->h_transaction->t_tid,
++                              (unsigned long) sbi->s_last_transaction);
++              list_splice_init(&sbi->s_closed_transaction,
++                              &sbi->s_committed_transaction);
++              list_splice_init(&sbi->s_active_transaction,
++                              &sbi->s_closed_transaction);
++              sbi->s_last_transaction = handle->h_transaction->t_tid;
++      }
++      spin_unlock(&sbi->s_md_lock);
++
++      ext3_mb_free_committed_blocks(sb);
++}
++
++int ext3_mb_free_metadata(handle_t *handle, struct ext3_buddy *e3b,
++                        int group, int block, int count)
++{
++      struct ext3_group_info *db = e3b->bd_info;
++      struct super_block *sb = e3b->bd_sb;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++      struct ext3_free_metadata *md;
++      int i;
++
++      BUG_ON(e3b->bd_bitmap_page == NULL);
++      BUG_ON(e3b->bd_buddy_page == NULL);
++
++      ext3_lock_group(sb, group);
++      for (i = 0; i < count; i++) {
++              md = db->bb_md_cur;
++              if (md && db->bb_tid != handle->h_transaction->t_tid) {
++                      db->bb_md_cur = NULL;
++                      md = NULL;
++              }
++
++              if (md == NULL) {
++                      ext3_unlock_group(sb, group);
++                      md = kmalloc(sizeof(*md), GFP_KERNEL);
++                      if (md == NULL)
++                              return -ENOMEM;
++                      md->num = 0;
++                      md->group = group;
++
++                      ext3_lock_group(sb, group);
++                      if (db->bb_md_cur == NULL) {
++                              spin_lock(&sbi->s_md_lock);
++                              list_add(&md->list, &sbi->s_active_transaction);
++                              spin_unlock(&sbi->s_md_lock);
++                              /* protect buddy cache from being freed,
++                               * otherwise we'll refresh it from
++                               * on-disk bitmap and lose not-yet-available
++                               * blocks */
++                              page_cache_get(e3b->bd_buddy_page);
++                              page_cache_get(e3b->bd_bitmap_page);
++                              db->bb_md_cur = md;
++                              db->bb_tid = handle->h_transaction->t_tid;
++                              mb_debug("new md 0x%p for group %u\n",
++                                              md, md->group);
++                      } else {
++                              kfree(md);
++                              md = db->bb_md_cur;
++                      }
++              }
++
++              BUG_ON(md->num >= EXT3_BB_MAX_BLOCKS);
++              md->blocks[md->num] = block + i;
++              md->num++;
++              if (md->num == EXT3_BB_MAX_BLOCKS) {
++                      /* no more space, put full container on a sb's list */
++                      db->bb_md_cur = NULL;
++              }
++      }
++      ext3_unlock_group(sb, group);
++      return 0;
++}
++
++/*
++ * Main entry point into mballoc to free blocks
++ */
++void ext3_mb_free_blocks(handle_t *handle, struct inode *inode,
++                      unsigned long block, unsigned long count,
++                      int metadata, int *freed)
++{
++      struct buffer_head *bitmap_bh = NULL;
++      struct super_block *sb = inode->i_sb;
++      struct ext3_allocation_context ac;
++      struct ext3_group_desc *gdp;
++      struct ext3_super_block *es;
++      unsigned long bit, overflow;
++      struct buffer_head *gd_bh;
++      unsigned long block_group;
++      struct ext3_sb_info *sbi;
++      struct ext3_buddy e3b;
++      int err = 0, ret;
++
++      *freed = 0;
++
++      ext3_mb_poll_new_transaction(sb, handle);
++
++      sbi = EXT3_SB(sb);
++      es = EXT3_SB(sb)->s_es;
++      if (block < le32_to_cpu(es->s_first_data_block) ||
++          block + count < block ||
++          block + count > le32_to_cpu(es->s_blocks_count)) {
++              ext3_error (sb, __FUNCTION__,
++                          "Freeing blocks not in datazone - "
++                          "block = %lu, count = %lu", block, count);
++              goto error_return;
++      }
++
++      ext3_debug("freeing block %lu\n", block);
++
++      ac.ac_op = EXT3_MB_HISTORY_FREE;
++      ac.ac_inode = inode;
++      ac.ac_sb = sb;
++
++do_more:
++      overflow = 0;
++      ext3_get_group_no_and_offset(sb, block, &block_group, &bit);
++
++      /*
++       * Check to see if we are freeing blocks across a group
++       * boundary.
++       */
++      if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
++              overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
++              count -= overflow;
++      }
++      brelse(bitmap_bh);
++      bitmap_bh = read_block_bitmap(sb, block_group);
++      if (!bitmap_bh)
++              goto error_return;
++      gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
++      if (!gdp)
++              goto error_return;
++
++      if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
++          in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
++          in_range (block, le32_to_cpu(gdp->bg_inode_table),
++                    EXT3_SB(sb)->s_itb_per_group) ||
++          in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
++                    EXT3_SB(sb)->s_itb_per_group))
++              ext3_error(sb, __FUNCTION__,
++                         "Freeing blocks in system zone - "
++                         "Block = %lu, count = %lu", block, count);
++
++      BUFFER_TRACE(bitmap_bh, "getting write access");
++      err = ext3_journal_get_write_access(handle, bitmap_bh);
++      if (err)
++              goto error_return;
++
++      /*
++       * We are about to modify some metadata.  Call the journal APIs
++       * to unshare ->b_data if a currently-committing transaction is
++       * using it
++       */
++      BUFFER_TRACE(gd_bh, "get_write_access");
++      err = ext3_journal_get_write_access(handle, gd_bh);
++      if (err)
++              goto error_return;
++
++      err = ext3_mb_load_buddy(sb, block_group, &e3b);
++      if (err)
++              goto error_return;
++
++#ifdef AGGRESSIVE_CHECK
++      {
++              int i;
++              for (i = 0; i < count; i++)
++                      BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
++      }
++#endif
++      mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, bit,
++                      count);
++
++      /* We dirtied the bitmap block */
++      BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
++      err = ext3_journal_dirty_metadata(handle, bitmap_bh);
++
++      ac.ac_b_ex.fe_group = block_group;
++      ac.ac_b_ex.fe_start = bit;
++      ac.ac_b_ex.fe_len = count;
++      ext3_mb_store_history(&ac);
++
++      if (metadata) {
++              /* blocks being freed are metadata. these blocks shouldn't
++               * be used until this transaction is committed */
++              ext3_mb_free_metadata(handle, &e3b, block_group, bit, count);
++      } else {
++              ext3_lock_group(sb, block_group);
++              err = mb_free_blocks(inode, &e3b, bit, count);
++              ext3_mb_return_to_preallocation(inode, &e3b, block, count);
++              ext3_unlock_group(sb, block_group);
++              BUG_ON(err != 0);
++      }
++
++      spin_lock(sb_bgl_lock(sbi, block_group));
++      gdp->bg_free_blocks_count =
++              cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
++      spin_unlock(sb_bgl_lock(sbi, block_group));
++      percpu_counter_mod(&sbi->s_freeblocks_counter, count);
++
++      ext3_mb_release_desc(&e3b);
++
++      *freed += count;
++
++      /* And the group descriptor block */
++      BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
++      ret = ext3_journal_dirty_metadata(handle, gd_bh);
++      if (!err) err = ret;
++
++      if (overflow && !err) {
++              block += count;
++              count = overflow;
++              goto do_more;
++      }
++      sb->s_dirt = 1;
++error_return:
++      brelse(bitmap_bh);
++      ext3_std_error(sb, err);
++      return;
++}
diff --git a/ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..afb34e1
--- /dev/null
@@ -0,0 +1,463 @@
+Index: linux-2.6.18/fs/ext3/super.c
+===================================================================
+--- linux-2.6.18.orig/fs/ext3/super.c
++++ linux-2.6.18/fs/ext3/super.c
+@@ -35,6 +35,8 @@
+ #include <linux/namei.h>
+ #include <linux/quotaops.h>
+ #include <linux/seq_file.h>
++#include <linux/kthread.h>
++#include <linux/utsname.h>
+ #include <asm/uaccess.h>
+@@ -435,6 +437,9 @@ static void ext3_put_super (struct super
+               invalidate_bdev(sbi->journal_bdev, 0);
+               ext3_blkdev_remove(sbi);
+       }
++      if (sbi->s_mmp_tsk)
++              kthread_stop(sbi->s_mmp_tsk);
++
+       sb->s_fs_info = NULL;
+       kfree(sbi);
+       return;
+@@ -1528,6 +1533,313 @@ static ext3_fsblk_t descriptor_loc(struc
+       return (has_super + ext3_group_first_block_no(sb, bg));
+ }
++/*
++ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
++ * faster.
++ */
++static int write_mmp_block(struct buffer_head *bh)
++{
++      mark_buffer_dirty(bh);
++      lock_buffer(bh);
++      bh->b_end_io = end_buffer_write_sync;
++      get_bh(bh);
++      submit_bh(WRITE_SYNC, bh);
++      wait_on_buffer(bh);
++      if (unlikely(!buffer_uptodate(bh)))
++              return 1;
++
++      return 0;
++}
++
++/*
++ * Read the MMP block. It _must_ be read from disk and hence we clear the
++ * uptodate flag on the buffer.
++ */
++static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
++                        unsigned long mmp_block)
++{
++      struct mmp_struct *mmp;
++
++      if (*bh)
++              clear_buffer_uptodate(*bh);
++
++      brelse(*bh);
++
++      *bh = sb_bread(sb, mmp_block);
++      if (!*bh) {
++              ext3_warning(sb, __FUNCTION__,
++                           "Error while reading MMP block %lu", mmp_block);
++              return -EIO;
++      }
++
++      mmp = (struct mmp_struct *)((*bh)->b_data);
++      if (le32_to_cpu(mmp->mmp_magic) != EXT3_MMP_MAGIC)
++              return -EINVAL;
++
++      return 0;
++}
++
++/*
++ * Dump as much information as possible to help the admin.
++ */
++static void dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
++                       const char *function, const char *msg)
++{
++      ext3_warning(sb, function, msg);
++      ext3_warning(sb, function, "MMP failure info: last update time: %llu, "
++                   "last update node: %s, last update device: %s\n",
++                   le64_to_cpu(mmp->mmp_time), mmp->mmp_nodename,
++                   mmp->mmp_bdevname);
++}
++
++/*
++ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
++ */
++static int kmmpd(void *data)
++{
++      struct super_block *sb = (struct super_block *) data;
++      struct ext3_super_block *es = EXT3_SB(sb)->s_es;
++      struct buffer_head *bh = NULL;
++      struct mmp_struct *mmp;
++      unsigned long mmp_block;
++      u32 seq = 0;
++      unsigned long failed_writes = 0;
++      int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
++      unsigned mmp_check_interval;
++      unsigned long last_update_time;
++      unsigned long diff;
++      int retval;
++
++      mmp_block = le64_to_cpu(es->s_mmp_block);
++      retval = read_mmp_block(sb, &bh, mmp_block);
++      if (retval)
++              goto failed;
++
++      mmp = (struct mmp_struct *)(bh->b_data);
++      mmp->mmp_time = cpu_to_le64(get_seconds());
++      /*
++       * Start with the higher mmp_check_interval and reduce it if
++       * the MMP block is being updated on time.
++       */
++      mmp_check_interval = max(5 * mmp_update_interval,
++                               EXT3_MMP_MIN_CHECK_INTERVAL);
++      mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
++      bdevname(bh->b_bdev, mmp->mmp_bdevname);
++
++      down_read(&uts_sem);
++      memcpy(mmp->mmp_nodename, utsname()->nodename,
++             sizeof(mmp->mmp_nodename));
++      up_read(&uts_sem);
++
++      while (!kthread_should_stop()) {
++              if (++seq > EXT3_MMP_SEQ_MAX)
++                      seq = 1;
++
++              mmp->mmp_seq = cpu_to_le32(seq);
++              mmp->mmp_time = cpu_to_le64(get_seconds());
++              last_update_time = jiffies;
++
++              retval = write_mmp_block(bh);
++              /*
++               * Don't spew too many error messages. Print one every
++               * (s_mmp_update_interval * 60) seconds.
++               */
++              if (retval && (failed_writes % 60) == 0) {
++                      ext3_error(sb, __FUNCTION__,
++                                 "Error writing to MMP block");
++                      failed_writes++;
++              }
++
++              if (!(le32_to_cpu(es->s_feature_incompat) &
++                  EXT3_FEATURE_INCOMPAT_MMP)) {
++                      ext3_warning(sb, __FUNCTION__, "kmmpd being stopped "
++                                   "since MMP feature has been disabled.");
++                      EXT3_SB(sb)->s_mmp_tsk = 0;
++                      goto failed;
++              }
++
++              if (sb->s_flags & MS_RDONLY) {
++                      ext3_warning(sb, __FUNCTION__, "kmmpd being stopped "
++                                   "since filesystem has been remounted as "
++                                   "readonly.");
++                      EXT3_SB(sb)->s_mmp_tsk = 0;
++                      goto failed;
++              }
++
++              diff = jiffies - last_update_time;
++              if (diff < mmp_update_interval * HZ)
++                      schedule_timeout_interruptible(EXT3_MMP_UPDATE_INTERVAL*
++                                                     HZ - diff);
++
++              /*
++               * We need to make sure that more than mmp_check_interval
++               * seconds have not passed since writing. If that has happened
++               * we need to check if the MMP block is as we left it.
++               */
++              diff = jiffies - last_update_time;
++              if (diff > mmp_check_interval * HZ) {
++                      struct buffer_head *bh_check = NULL;
++                      struct mmp_struct *mmp_check;
++
++                      retval = read_mmp_block(sb, &bh_check, mmp_block);
++                      if (retval) {
++                              EXT3_SB(sb)->s_mmp_tsk = 0;
++                              goto failed;
++                      }
++
++                      mmp_check = (struct mmp_struct *)(bh_check->b_data);
++                      if (mmp->mmp_time != mmp_check->mmp_time ||
++                          memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
++                                 sizeof(mmp->mmp_nodename)))
++                              dump_mmp_msg(sb, mmp_check, __FUNCTION__,
++                                           "Error while updating MMP info. "
++                                           "The filesystem seems to have "
++                                           "been multiply mounted.");
++
++                      put_bh(bh_check);
++              }
++
++              /*
++               * Adjust the mmp_check_interval depending on how much time
++               * it took for the MMP block to be written.
++               */
++              mmp_check_interval = max(5 * diff / HZ,
++                               (unsigned long) EXT3_MMP_MIN_CHECK_INTERVAL);
++              mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
++      }
++
++      /*
++       * Unmount seems to be clean.
++       */
++      mmp->mmp_seq = cpu_to_le32(EXT3_MMP_SEQ_CLEAN);
++      mmp->mmp_time = cpu_to_le64(get_seconds());
++
++      retval = write_mmp_block(bh);
++
++failed:
++      brelse(bh);
++      return retval;
++}
++
++/*
++ * Get a random new sequence number but make sure it is not greater than
++ * EXT3_MMP_SEQ_MAX.
++ */
++static unsigned int mmp_new_seq(void)
++{
++      u32 new_seq;
++
++      do {
++              get_random_bytes(&new_seq, sizeof(u32));
++      } while (new_seq > EXT3_MMP_SEQ_MAX);
++
++      return new_seq;
++}
++
++/*
++ * Protect the filesystem from being mounted more than once.
++ */
++static int ext3_multi_mount_protect(struct super_block *sb,
++                                  unsigned long mmp_block)
++{
++      struct ext3_super_block *es = EXT3_SB(sb)->s_es;
++      struct buffer_head *bh = NULL;
++      struct mmp_struct *mmp = NULL;
++      u32 seq;
++      unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
++      int retval;
++
++      if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
++          mmp_block >= le32_to_cpu(es->s_blocks_count)) {
++              ext3_warning(sb, __FUNCTION__,
++                           "Invalid MMP block in superblock");
++              goto failed;
++      }
++
++      retval = read_mmp_block(sb, &bh, mmp_block);
++      if (retval)
++              goto failed;
++
++      mmp = (struct mmp_struct *)(bh->b_data);
++
++      if (mmp_check_interval < EXT3_MMP_MIN_CHECK_INTERVAL)
++              mmp_check_interval = EXT3_MMP_MIN_CHECK_INTERVAL;
++
++      /*
++       * If check_interval in MMP block is larger, use that instead of
++       * update_interval from the superblock.
++       */
++      if (mmp->mmp_check_interval > mmp_check_interval)
++              mmp_check_interval = mmp->mmp_check_interval;
++
++      seq = le32_to_cpu(mmp->mmp_seq);
++      if (seq == EXT3_MMP_SEQ_CLEAN)
++              goto skip;
++
++      if (seq == EXT3_MMP_SEQ_FSCK) {
++              dump_mmp_msg(sb, mmp, __FUNCTION__,
++                           "fsck is running on the filesystem");
++              goto failed;
++      }
++
++      schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1));
++
++      retval = read_mmp_block(sb, &bh, mmp_block);
++      if (retval)
++              goto failed;
++      mmp = (struct mmp_struct *)(bh->b_data);
++      if (seq != le32_to_cpu(mmp->mmp_seq)) {
++              dump_mmp_msg(sb, mmp, __FUNCTION__,
++                           "Device is already active on another node.");
++              goto failed;
++      }
++
++skip:
++      /*
++       * write a new random sequence number.
++       */
++      mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
++
++      retval = write_mmp_block(bh);
++      if (retval)
++              goto failed;
++
++      /*
++       * wait for MMP interval and check mmp_seq.
++       */
++      schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1));
++
++      retval = read_mmp_block(sb, &bh, mmp_block);
++      if (retval)
++              goto failed;
++      mmp = (struct mmp_struct *)(bh->b_data);
++      if (seq != le32_to_cpu(mmp->mmp_seq)) {
++              dump_mmp_msg(sb, mmp, __FUNCTION__,
++                           "Device is already active on another node.");
++              goto failed;
++      }
++
++      /*
++       * Start a kernel thread to update the MMP block periodically.
++       */
++      EXT3_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%02x:%02x",
++                                           MAJOR(sb->s_dev),
++                                           MINOR(sb->s_dev));
++      if (IS_ERR(EXT3_SB(sb)->s_mmp_tsk)) {
++              EXT3_SB(sb)->s_mmp_tsk = 0;
++              ext3_warning(sb, __FUNCTION__, "Unable to create kmmpd thread "
++                           "for %s.", sb->s_id);
++              goto failed;
++      }
++
++      brelse(bh);
++      return 0;
++
++failed:
++      brelse(bh);
++      return 1;
++}
++
+ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+ {
+@@ -1844,6 +2156,11 @@ static int ext3_fill_super (struct super
+                         EXT3_HAS_INCOMPAT_FEATURE(sb,
+                                   EXT3_FEATURE_INCOMPAT_RECOVER));
++      if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_MMP) &&
++          !(sb->s_flags & MS_RDONLY))
++              if (ext3_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
++                      goto failed_mount3;
++
+       /*
+        * The first inode we look at is the journal inode.  Don't try
+        * root first: it may be modified in the journal!
+@@ -2446,7 +2763,7 @@ static int ext3_remount (struct super_bl
+       ext3_fsblk_t n_blocks_count = 0;
+       unsigned long old_sb_flags;
+       struct ext3_mount_options old_opts;
+-      int err;
++      int err = 0;
+ #ifdef CONFIG_QUOTA
+       int i;
+ #endif
+@@ -2530,6 +2847,11 @@ static int ext3_remount (struct super_bl
+                       }
+                       if (!ext3_setup_super (sb, es, 0))
+                               sb->s_flags &= ~MS_RDONLY;
++                      if (EXT3_HAS_INCOMPAT_FEATURE(sb,
++                                                  EXT3_FEATURE_INCOMPAT_MMP))
++                              if (ext3_multi_mount_protect(sb,
++                                              le64_to_cpu(es->s_mmp_block)))
++                                      goto restore_opts;
+               }
+       }
+ #ifdef CONFIG_QUOTA
+Index: linux-2.6.18/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.18.orig/include/linux/ext3_fs.h
++++ linux-2.6.18/include/linux/ext3_fs.h
+@@ -593,13 +593,17 @@ struct ext3_super_block {
+       __le32  s_first_meta_bg;        /* First metablock block group */
+       __le32  s_mkfs_time;            /* When the filesystem was created */
+       __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
+-      __le32  s_blocks_count_hi;      /* Blocks count high 32 bits */
++/*150*/       __le32  s_blocks_count_hi;      /* Blocks count high 32 bits */
+       __le32  s_r_blocks_count_hi;    /* Reserved blocks count high 32 bits*/
+       __le32  s_free_blocks_count_hi; /* Free blocks count high 32 bits */
+       __le16  s_min_extra_isize;      /* All inodes have at least # bytes */
+       __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
+-      __le32  s_flags;                /* Miscellaneous flags */
+-      __u32   s_reserved[167];        /* Padding to the end of the block */
++/*160*/       __le32  s_flags;                /* Miscellaneous flags */
++      __le16  s_raid_stride;          /* RAID stride */
++      __le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
++      __le64  s_mmp_block;            /* Block for multi-mount protection */
++/*170*/       __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
++      __le32  s_reserved[163];        /* Padding to the end of the block */
+ };
+ #ifdef __KERNEL__
+@@ -702,12 +706,14 @@ static inline int ext3_valid_inum(struct
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV     0x0008 /* Journal device */
+ #define EXT3_FEATURE_INCOMPAT_META_BG         0x0010
+ #define EXT3_FEATURE_INCOMPAT_EXTENTS         0x0040 /* extents support */
++#define EXT3_FEATURE_INCOMPAT_MMP             0x0100
+ #define EXT3_FEATURE_COMPAT_SUPP      EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT3_FEATURE_INCOMPAT_SUPP    (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT3_FEATURE_INCOMPAT_RECOVER| \
+                                        EXT3_FEATURE_INCOMPAT_META_BG| \
+-                                       EXT3_FEATURE_INCOMPAT_EXTENTS)
++                                       EXT3_FEATURE_INCOMPAT_EXTENTS| \
++                                       EXT3_FEATURE_INCOMPAT_MMP)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP   (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
+@@ -870,6 +876,39 @@ ext3_group_first_block_no(struct super_b
+ #define ERR_BAD_DX_DIR        -75000
+ /*
++ * This structure will be used for multiple mount protection. It will be
++ * written into the block number saved in the s_mmp_block field in the
++ * superblock. Programs that check MMP should assume that if
++ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
++ * to use the filesystem, regardless of how old the timestamp is.
++ */
++#define EXT3_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
++#define EXT3_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
++#define EXT3_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
++#define EXT3_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
++
++struct mmp_struct {
++      __le32  mmp_magic;
++      __le32  mmp_seq;
++      __le64  mmp_time;
++      char    mmp_nodename[64];
++      char    mmp_bdevname[32];
++      __le16  mmp_check_interval;
++      __le16  mmp_pad1;
++      __le32  mmp_pad2[227];
++};
++
++/*
++ * Default interval in seconds to update the MMP sequence number.
++ */
++#define EXT3_MMP_UPDATE_INTERVAL   1
++
++/*
++ * Minimum interval for MMP checking in seconds.
++ */
++#define EXT3_MMP_MIN_CHECK_INTERVAL   5
++
++/*
+  * Function prototypes
+  */
+Index: linux-2.6.18/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.18.orig/include/linux/ext3_fs_sb.h
++++ linux-2.6.18/include/linux/ext3_fs_sb.h
+@@ -151,6 +151,7 @@ struct ext3_sb_info {
+       /* locality groups */
+       struct ext3_locality_group *s_locality_groups;
++      struct task_struct *s_mmp_tsk;  /* Kernel thread for multiple mount protection */
+ };
+ #define EXT3_GROUP_INFO(sb, group)                                       \
diff --git a/ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..1347a83
--- /dev/null
@@ -0,0 +1,407 @@
+Index: linux-2.6.18.8/fs/ext3/ialloc.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/ialloc.c       2007-06-20 18:54:59.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/ialloc.c    2007-06-20 18:54:59.000000000 +0200
+@@ -729,7 +729,8 @@ got:
+       /* This is the optimal IO size (for stat), not the fs block size */
+       inode->i_blksize = PAGE_SIZE;
+       inode->i_blocks = 0;
+-      inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
++      inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
++                                                     ext3_current_time(inode);
+       memset(ei->i_data, 0, sizeof(ei->i_data));
+       ei->i_dir_start_lookup = 0;
+@@ -761,9 +762,8 @@ got:
+       spin_unlock(&sbi->s_next_gen_lock);
+       ei->i_state = EXT3_STATE_NEW;
+-      ei->i_extra_isize =
+-              (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
+-              sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
++
++      ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize;
+       ret = inode;
+       if(DQUOT_ALLOC_INODE(inode)) {
+Index: linux-2.6.18.8/fs/ext3/inode.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/inode.c        2007-06-20 18:54:52.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/inode.c     2007-06-20 18:54:59.000000000 +0200
+@@ -727,7 +727,7 @@ static int ext3_splice_branch(handle_t *
+       /* We are done with atomic stuff, now do the rest of housekeeping */
+-      inode->i_ctime = CURRENT_TIME_SEC;
++      inode->i_ctime = ext3_current_time(inode);
+       ext3_mark_inode_dirty(handle, inode);
+       /* had we spliced it onto indirect block? */
+@@ -2375,7 +2375,7 @@ do_indirects:
+       ext3_discard_reservation(inode);
+       mutex_unlock(&ei->truncate_mutex);
+-      inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
++      inode->i_mtime = inode->i_ctime = ext3_current_time(inode);
+       ext3_mark_inode_dirty(handle, inode);
+       /*
+@@ -2611,10 +2611,6 @@ void ext3_read_inode(struct inode * inod
+       }
+       inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+       inode->i_size = le32_to_cpu(raw_inode->i_size);
+-      inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+-      inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
+-      inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
+-      inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
+       ei->i_state = 0;
+       ei->i_dir_start_lookup = 0;
+@@ -2689,6 +2685,11 @@ void ext3_read_inode(struct inode * inod
+       } else
+               ei->i_extra_isize = 0;
++      EXT3_INODE_GET_XTIME(i_ctime, inode, raw_inode);
++      EXT3_INODE_GET_XTIME(i_mtime, inode, raw_inode);
++      EXT3_INODE_GET_XTIME(i_atime, inode, raw_inode);
++      EXT3_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
++
+       if (S_ISREG(inode->i_mode)) {
+               inode->i_op = &ext3_file_inode_operations;
+               inode->i_fop = &ext3_file_operations;
+@@ -2769,9 +2770,12 @@ static int ext3_do_update_inode(handle_t
+       }
+       raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+       raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+-      raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+-      raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+-      raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
++
++      EXT3_INODE_SET_XTIME(i_ctime, inode, raw_inode);
++      EXT3_INODE_SET_XTIME(i_mtime, inode, raw_inode);
++      EXT3_INODE_SET_XTIME(i_atime, inode, raw_inode);
++      EXT3_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
++
+       raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+       raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+       raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+Index: linux-2.6.18.8/fs/ext3/ioctl.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/ioctl.c        2007-06-20 18:42:05.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/ioctl.c     2007-06-20 18:54:59.000000000 +0200
+@@ -120,7 +120,7 @@ int ext3_ioctl (struct inode * inode, st
+               ei->i_flags = flags;
+               ext3_set_inode_flags(inode);
+-              inode->i_ctime = CURRENT_TIME_SEC;
++              inode->i_ctime = ext3_current_time(inode);
+               err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+ flags_err:
+@@ -157,7 +157,7 @@ flags_err:
+                       return PTR_ERR(handle);
+               err = ext3_reserve_inode_write(handle, inode, &iloc);
+               if (err == 0) {
+-                      inode->i_ctime = CURRENT_TIME_SEC;
++                      inode->i_ctime = ext3_current_time(inode);
+                       inode->i_generation = generation;
+                       err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+               }
+Index: linux-2.6.18.8/fs/ext3/namei.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/namei.c        2007-06-20 18:54:53.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/namei.c     2007-06-20 18:54:59.000000000 +0200
+@@ -1287,7 +1287,7 @@ static int add_dirent_to_buf(handle_t *h
+        * happen is that the times are slightly out of date
+        * and/or different from the directory change time.
+        */
+-      dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
++      dir->i_mtime = dir->i_ctime = ext3_current_time(dir);
+       ext3_update_dx_flag(dir);
+       dir->i_version++;
+       ext3_mark_inode_dirty(handle, dir);
+@@ -2079,7 +2079,7 @@ static int ext3_rmdir (struct inode * di
+       inode->i_version++;
+       inode->i_nlink = 0;
+       ext3_orphan_add(handle, inode);
+-      inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
++      inode->i_ctime = dir->i_ctime = dir->i_mtime = ext3_current_time(inode);
+       ext3_mark_inode_dirty(handle, inode);
+       ext3_dec_count(handle, dir);
+       ext3_update_dx_flag(dir);
+@@ -2129,13 +2129,13 @@ static int ext3_unlink(struct inode * di
+       retval = ext3_delete_entry(handle, dir, de, bh);
+       if (retval)
+               goto end_unlink;
+-      dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
++      dir->i_ctime = dir->i_mtime = ext3_current_time(dir);
+       ext3_update_dx_flag(dir);
+       ext3_mark_inode_dirty(handle, dir);
+       ext3_dec_count(handle, inode);
+       if (!inode->i_nlink)
+               ext3_orphan_add(handle, inode);
+-      inode->i_ctime = dir->i_ctime;
++      inode->i_ctime = ext3_current_time(inode);
+       ext3_mark_inode_dirty(handle, inode);
+       retval = 0;
+@@ -2237,8 +2237,8 @@ retry:
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+-      inode->i_ctime = CURRENT_TIME_SEC;
++      inode->i_ctime = ext3_current_time(inode);
+-      inc_nlink(inode);
++      ext3_inc_count(handle, inode);
+       atomic_inc(&inode->i_count);
+       err = ext3_add_link(handle, dentry, inode);
+@@ -2340,7 +2340,7 @@ static int ext3_rename (struct inode * o
+        * Like most other Unix systems, set the ctime for inodes on a
+        * rename.
+        */
+-      old_inode->i_ctime = CURRENT_TIME_SEC;
++      old_inode->i_ctime = ext3_current_time(old_inode);
+       ext3_mark_inode_dirty(handle, old_inode);
+       /*
+@@ -2373,9 +2373,9 @@ static int ext3_rename (struct inode * o
+       if (new_inode) {
+               ext3_dec_count(handle, new_inode);
+-              new_inode->i_ctime = CURRENT_TIME_SEC;
++              new_inode->i_ctime = ext3_current_time(new_inode);
+       }
+-      old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
++      old_dir->i_ctime = old_dir->i_mtime = ext3_current_time(old_dir);
+       ext3_update_dx_flag(old_dir);
+       if (dir_bh) {
+               BUFFER_TRACE(dir_bh, "get_write_access");
+Index: linux-2.6.18.8/fs/ext3/super.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/super.c        2007-06-20 18:54:59.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/super.c     2007-06-20 18:54:59.000000000 +0200
+@@ -1713,6 +1713,8 @@ static int ext3_fill_super (struct super
+                               sbi->s_inode_size);
+                       goto failed_mount;
+               }
++              if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE)
++                      sb->s_time_gran = 1 << (EXT3_EPOCH_BITS - 2);
+       }
+       sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
+                                  le32_to_cpu(es->s_log_frag_size);
+@@ -1917,6 +1919,32 @@ static int ext3_fill_super (struct super
+       }
+       ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
++
++      /* determine the minimum size of new large inodes, if present */
++      if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) {
++              sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
++                                                   EXT3_GOOD_OLD_INODE_SIZE;
++              if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
++                                     EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
++                      if (sbi->s_want_extra_isize <
++                          le16_to_cpu(es->s_want_extra_isize))
++                              sbi->s_want_extra_isize =
++                                      le16_to_cpu(es->s_want_extra_isize);
++                      if (sbi->s_want_extra_isize <
++                          le16_to_cpu(es->s_min_extra_isize))
++                              sbi->s_want_extra_isize =
++                                      le16_to_cpu(es->s_min_extra_isize);
++              }
++      }
++      /* Check if enough inode space is available */
++      if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
++                                                      sbi->s_inode_size) {
++              sbi->s_want_extra_isize = sizeof(struct ext3_inode) -
++                                                     EXT3_GOOD_OLD_INODE_SIZE;
++              printk(KERN_INFO "EXT3-fs: required extra inode space not"
++                      "available.\n");
++      }
++
+       /*
+        * akpm: core read_super() calls in here with the superblock locked.
+        * That deadlocks, because orphan cleanup needs to lock the superblock
+Index: linux-2.6.18.8/fs/ext3/xattr.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/xattr.c        2007-06-20 18:54:52.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/xattr.c     2007-06-20 18:54:59.000000000 +0200
+@@ -1007,7 +1007,7 @@ ext3_xattr_set_handle(handle_t *handle, 
+       }
+       if (!error) {
+               ext3_xattr_update_super_block(handle, inode->i_sb);
+-              inode->i_ctime = CURRENT_TIME_SEC;
++              inode->i_ctime = ext3_current_time(inode);
+               error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
+               /*
+                * The bh is consumed by ext3_mark_iloc_dirty, even with
+Index: linux-2.6.18.8/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs.h        2007-06-20 18:54:59.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs.h     2007-06-20 18:54:59.000000000 +0200
+@@ -288,7 +288,7 @@ struct ext3_inode {
+       __le16  i_uid;          /* Low 16 bits of Owner Uid */
+       __le32  i_size;         /* Size in bytes */
+       __le32  i_atime;        /* Access time */
+-      __le32  i_ctime;        /* Creation time */
++      __le32  i_ctime;        /* Inode Change time */
+       __le32  i_mtime;        /* Modification time */
+       __le32  i_dtime;        /* Deletion Time */
+       __le16  i_gid;          /* Low 16 bits of Group Id */
+@@ -337,10 +337,73 @@ struct ext3_inode {
+       } osd2;                         /* OS dependent 2 */
+       __le16  i_extra_isize;
+       __le16  i_pad1;
++      __le32  i_ctime_extra;  /* extra Change time      (nsec << 2 | epoch) */
++      __le32  i_mtime_extra;  /* extra Modification time(nsec << 2 | epoch) */
++      __le32  i_atime_extra;  /* extra Access time      (nsec << 2 | epoch) */
++      __le32  i_crtime;       /* File Creation time */
++      __le32  i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */
+ };
+ #define i_size_high   i_dir_acl
++#define EXT3_EPOCH_BITS 2
++#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1)
++#define EXT3_NSEC_MASK  (~0UL << EXT3_EPOCH_BITS)
++
++#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \
++      ((offsetof(typeof(*ext3_inode), field) +        \
++        sizeof((ext3_inode)->field))                  \
++      <= (EXT3_GOOD_OLD_INODE_SIZE +                  \
++          (einode)->i_extra_isize))                   \
++
++static inline __le32 ext3_encode_extra_time(struct timespec *time)
++{
++      return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
++                          time->tv_sec >> 32 : 0) |
++                          ((time->tv_nsec << 2) & EXT3_NSEC_MASK));
++}
++
++static inline void ext3_decode_extra_time(struct timespec *time, __le32 extra) {
++      if (sizeof(time->tv_sec) > 4)
++              time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT3_EPOCH_MASK)
++                              << 32;
++      time->tv_nsec = (le32_to_cpu(extra) & EXT3_NSEC_MASK) >> 2;
++}
++
++#define EXT3_INODE_SET_XTIME(xtime, inode, raw_inode)                        \
++do {                                                                         \
++      (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec);               \
++      if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra))     \
++              (raw_inode)->xtime ## _extra =                                 \
++                              ext3_encode_extra_time(&(inode)->xtime);       \
++} while (0)
++
++#define EXT3_EINODE_SET_XTIME(xtime, einode, raw_inode)\
++do {                                                                         \
++      if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime))                      \
++              (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec);      \
++      if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))            \
++              (raw_inode)->xtime ## _extra =                                 \
++                              ext3_encode_extra_time(&(einode)->xtime);      \
++} while (0)
++
++#define EXT3_INODE_GET_XTIME(xtime, inode, raw_inode)                        \
++do {                                                                         \
++      (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime);               \
++      if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra))     \
++              ext3_decode_extra_time(&(inode)->xtime,                        \
++                                     raw_inode->xtime ## _extra);            \
++} while (0)
++
++#define EXT3_EINODE_GET_XTIME(xtime, einode, raw_inode)                      \
++do {                                                                         \
++      if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime))                      \
++              (einode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime);      \
++      if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))            \
++              ext3_decode_extra_time(&(einode)->xtime,                       \
++                                     raw_inode->xtime ## _extra);            \
++} while (0)
++
+ #if defined(__KERNEL__) || defined(__linux__)
+ #define i_reserved1   osd1.linux1.l_i_reserved1
+ #define i_frag                osd2.linux2.l_i_frag
+@@ -520,11 +583,19 @@ struct ext3_super_block {
+       __le32  s_last_orphan;          /* start of list of inodes to delete */
+       __le32  s_hash_seed[4];         /* HTREE hash seed */
+       __u8    s_def_hash_version;     /* Default hash version to use */
+-      __u8    s_reserved_char_pad;
+-      __u16   s_reserved_word_pad;
++      __u8    s_jnl_backup_type;      /* Default type of journal backup */
++      __le16  s_desc_size;            /* Group desc. size: INCOMPAT_64BIT */
+       __le32  s_default_mount_opts;
+-      __le32  s_first_meta_bg;        /* First metablock block group */
+-      __u32   s_reserved[190];        /* Padding to the end of the block */
++      __le32  s_first_meta_bg;        /* First metablock block group */
++      __le32  s_mkfs_time;            /* When the filesystem was created */
++      __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
++      __le32  s_blocks_count_hi;      /* Blocks count high 32 bits */
++      __le32  s_r_blocks_count_hi;    /* Reserved blocks count high 32 bits*/
++      __le32  s_free_blocks_count_hi; /* Free blocks count high 32 bits */
++      __le16  s_min_extra_isize;      /* All inodes have at least # bytes */
++      __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
++      __le32  s_flags;                /* Miscellaneous flags */
++      __u32   s_reserved[167];        /* Padding to the end of the block */
+ };
+ #ifdef __KERNEL__
+@@ -539,6 +610,13 @@ static inline struct ext3_inode_info *EX
+       return container_of(inode, struct ext3_inode_info, vfs_inode);
+ }
++static inline struct timespec ext3_current_time(struct inode *inode)
++{
++      return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
++              current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
++}
++
++
+ static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
+ {
+       return ino == EXT3_ROOT_INO ||
+@@ -611,6 +689,8 @@ static inline int ext3_valid_inum(struct
+ #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR      0x0004
+ #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM               0x0010
+ #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK      0x0020
++#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE    0x0040
++
+ #define EXT3_FEATURE_INCOMPAT_COMPRESSION     0x0001
+ #define EXT3_FEATURE_INCOMPAT_FILETYPE                0x0002
+@@ -628,6 +708,7 @@ static inline int ext3_valid_inum(struct
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
+                                        EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
++                                       EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \
+                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+ /*
+Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h     2007-06-20 18:54:54.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h  2007-06-20 18:54:59.000000000 +0200
+@@ -68,6 +68,9 @@ struct ext3_sb_info {
+       /* Last group used to allocate inode */
+       int s_last_alloc_group;
++      /* New inodes should reserve # bytes */
++      unsigned int  s_want_extra_isize;
++
+       /* root of the per fs reservation window tree */
+       spinlock_t s_rsv_window_lock;
+       struct rb_root s_rsv_window_root;
+Index: linux-2.6.18.8/include/linux/ext3_fs_i.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h      2007-06-20 18:54:57.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs_i.h   2007-06-20 18:54:59.000000000 +0200
+@@ -140,6 +140,8 @@ struct ext3_inode_info {
+       /* on-disk additional length */
+       __u16 i_extra_isize;
++      struct timespec i_crtime;
++
+       /*
+        * truncate_mutex is for serialising ext3_truncate() against
+        * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
diff --git a/ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..761c72c
--- /dev/null
@@ -0,0 +1,180 @@
+Index: linux-2.6.12/fs/ext3/namei.c
+===================================================================
+--- linux-2.6.12.orig/fs/ext3/namei.c
++++ linux-2.6.12/fs/ext3/namei.c
+@@ -1600,6 +1600,22 @@
+       return -ENOENT;
+ }
++static inline void ext3_inc_count(handle_t * handle, struct inode *inode)
++{
++      inc_nlink(inode);
++      if (is_dx(inode) && inode->i_nlink > 1) {
++              /* limit is 16-bit i_links_count */
++              if (inode->i_nlink >= EXT3_LINK_MAX || inode->i_nlink == 2)
++                      inode->i_nlink = 1;
++      }
++}
++
++static inline void ext3_dec_count(handle_t * handle, struct inode *inode)
++{
++      if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
++              drop_nlink(inode);
++}
++
+ static int ext3_add_nondir(handle_t *handle,
+               struct dentry *dentry, struct inode *inode)
+ {
+@@ -1659,7 +1659,7 @@ static int ext3_add_nondir(handle_t *han
+               d_instantiate(dentry, inode);
+               return 0;
+       }
+-      drop_nlink(inode);
++      ext3_dec_count(handle, inode);
+       iput(inode);
+       return err;
+ }
+@@ -1703,7 +1709,7 @@ static int ext3_mkdir(struct inode * dir
+       struct ext3_dir_entry_2 * de;
+       int err, retries = 0;
+-      if (dir->i_nlink >= EXT3_LINK_MAX)
++      if (EXT3_DIR_LINK_MAX(dir))
+               return -EMLINK;
+ retry:
+@@ -1726,7 +1732,7 @@ retry:
+       inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+       dir_block = ext3_bread (handle, inode, 0, 1, &err);
+       if (!dir_block) {
+-              drop_nlink(inode); /* is this nlink == 0? */
++              ext3_dec_count(handle, inode); /* is this nlink == 0? */
+               ext3_mark_inode_dirty(handle, inode);
+               iput (inode);
+               goto out_stop;
+@@ -1758,7 +1764,7 @@ retry:
+               iput (inode);
+               goto out_stop;
+       }
+-      inc_nlink(dir);
++      ext3_inc_count(handle, dir);
+       ext3_update_dx_flag(dir);
+       ext3_mark_inode_dirty(handle, dir);
+       d_instantiate(dentry, inode);
+@@ -2023,10 +2029,10 @@ static int ext3_rmdir (struct inode * di
+       retval = ext3_delete_entry(handle, dir, de, bh);
+       if (retval)
+               goto end_rmdir;
+-      if (inode->i_nlink != 2)
+-              ext3_warning (inode->i_sb, "ext3_rmdir",
+-                            "empty directory has nlink!=2 (%d)",
+-                            inode->i_nlink);
++      if (!EXT3_DIR_LINK_EMPTY(inode))
++              ext3_warning(inode->i_sb, "ext3_rmdir",
++                           "empty directory has too many links (%d)",
++                           inode->i_nlink);
+       inode->i_version++;
+       inode->i_nlink = 0;
+       /* There's no need to set i_disksize: the fact that i_nlink is
+@@ -2036,7 +2042,7 @@ static int ext3_rmdir (struct inode * di
+       ext3_orphan_add(handle, inode);
+       inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+       ext3_mark_inode_dirty(handle, inode);
+-      drop_nlink(dir);
++      ext3_dec_count(handle, dir);
+       ext3_update_dx_flag(dir);
+       ext3_mark_inode_dirty(handle, dir);
+@@ -2087,7 +2093,7 @@ static int ext3_unlink(struct inode * di
+       dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+       ext3_update_dx_flag(dir);
+       ext3_mark_inode_dirty(handle, dir);
+-      drop_nlink(inode);
++      ext3_dec_count(handle, inode);
+       if (!inode->i_nlink)
+               ext3_orphan_add(handle, inode);
+       inode->i_ctime = dir->i_ctime;
+@@ -2160,7 +2190,7 @@ retry:
+               err = __page_symlink(inode, symname, l,
+                               mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+               if (err) {
+-                      drop_nlink(inode);
++                      ext3_dec_count(handle, inode);
+                       ext3_mark_inode_dirty(handle, inode);
+                       iput (inode);
+                       goto out_stop;
+@@ -2162,7 +2168,7 @@ static int ext3_link (struct dentry * ol
+       struct inode *inode = old_dentry->d_inode;
+       int err, retries = 0;
+-      if (inode->i_nlink >= EXT3_LINK_MAX)
++      if (EXT3_DIR_LINK_MAX(inode))
+               return -EMLINK;
+ retry:
+@@ -2249,8 +2255,8 @@ static int ext3_rename (struct inode * o
+               if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
+                       goto end_rename;
+               retval = -EMLINK;
+-              if (!new_inode && new_dir!=old_dir &&
+-                              new_dir->i_nlink >= EXT3_LINK_MAX)
++              if (!new_inode && new_dir != old_dir &&
++                  EXT3_DIR_LINK_MAX(new_dir))
+                       goto end_rename;
+       }
+       if (!new_bh) {
+@@ -2307,7 +2313,7 @@ static int ext3_rename (struct inode * o
+       }
+       if (new_inode) {
+-              drop_nlink(new_inode);
++              ext3_dec_count(handle, new_inode);
+               new_inode->i_ctime = CURRENT_TIME_SEC;
+       }
+       old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+@@ -2318,11 +2324,13 @@ static int ext3_rename (struct inode * o
+               PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
+               BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
+               ext3_journal_dirty_metadata(handle, dir_bh);
+-              drop_nlink(old_dir);
++              ext3_dec_count(handle, old_dir);
+               if (new_inode) {
+-                      drop_nlink(new_inode);
++                      /* checked empty_dir above, can't have another parent,
++                       * ext3_dec_count() won't work for many-linked dirs */
++                      new_inode->i_nlink = 0;
+               } else {
+-                      inc_nlink(new_dir);
++                      ext3_inc_count(handle, new_dir);
+                       ext3_update_dx_flag(new_dir);
+                       ext3_mark_inode_dirty(handle, new_dir);
+               }
+Index: linux-2.6.12/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.12.orig/include/linux/ext3_fs.h
++++ linux-2.6.12/include/linux/ext3_fs.h
+@@ -78,7 +78,7 @@ struct statfs;
+ /*
+  * Maximal count of links to a file
+  */
+-#define EXT3_LINK_MAX         32000
++#define EXT3_LINK_MAX         65000
+ /*
+  * Macro-instructions used to manage several block sizes
+@@ -539,6 +539,7 @@ static inline struct ext3_inode_info *EX
+ #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER   0x0001
+ #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE     0x0002
+ #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR      0x0004
++#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK      0x0020
+ #define EXT3_FEATURE_INCOMPAT_COMPRESSION     0x0001
+ #define EXT3_FEATURE_INCOMPAT_FILETYPE                0x0002
+@@ -552,6 +553,7 @@ static inline struct ext3_inode_info *EX
+                                        EXT3_FEATURE_INCOMPAT_META_BG)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP   (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
++                                       EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
+                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+ /*
diff --git a/ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch b/ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch
new file mode 100644 (file)
index 0000000..19afc51
--- /dev/null
@@ -0,0 +1,71 @@
+Index: linux-2.6.18.8/fs/ext3/super.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/ext3/super.c        2007-07-20 16:51:14.000000000 +0200
++++ linux-2.6.18.8/fs/ext3/super.c     2007-07-20 16:54:17.000000000 +0200
+@@ -2572,19 +2572,19 @@ static int ext3_statfs (struct dentry * 
+       struct super_block *sb = dentry->d_sb;
+       struct ext3_sb_info *sbi = EXT3_SB(sb);
+       struct ext3_super_block *es = sbi->s_es;
+-      ext3_fsblk_t overhead;
+-      int i;
+       u64 fsid;
+-      if (test_opt (sb, MINIX_DF))
+-              overhead = 0;
+-      else {
+-              unsigned long ngroups;
+-              ngroups = EXT3_SB(sb)->s_groups_count;
++      if (test_opt(sb, MINIX_DF)) {
++              sbi->s_overhead_last = 0;
++      } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
++              unsigned long ngroups = sbi->s_groups_count, i;
++              ext3_fsblk_t overhead = 0;
+               smp_rmb();
+               /*
+-               * Compute the overhead (FS structures)
++               * Compute the overhead (FS structures).  This is constant
++               * for a given filesystem unless the number of block groups
++               * changes so we cache the previous value until it does.
+                */
+               /*
+@@ -2605,18 +2605,23 @@ static int ext3_statfs (struct dentry * 
+                * Every block group has an inode bitmap, a block
+                * bitmap, and an inode table.
+                */
+-              overhead += (ngroups * (2 + EXT3_SB(sb)->s_itb_per_group));
++              overhead += ngroups * (2 + sbi->s_itb_per_group);
++              sbi->s_overhead_last = overhead;
++              smp_wmb();
++              sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
+       }
+       buf->f_type = EXT3_SUPER_MAGIC;
+       buf->f_bsize = sb->s_blocksize;
+-      buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
++      buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
+       buf->f_bfree = percpu_counter_sum(&sbi->s_freeblocks_counter);
++      es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
+       buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
+       if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
+               buf->f_bavail = 0;
+       buf->f_files = le32_to_cpu(es->s_inodes_count);
+       buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
++      es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
+       buf->f_namelen = EXT3_NAME_LEN;
+       return 0;
+ }
+Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h
+===================================================================
+--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h     2007-07-20 16:51:23.000000000 +0200
++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h  2007-07-20 16:51:43.000000000 +0200
+@@ -45,6 +45,8 @@ struct ext3_sb_info {
+       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
+       unsigned long s_desc_per_block; /* Number of group descriptors per block */
+       unsigned long s_groups_count;   /* Number of groups in the fs */
++      unsigned long s_overhead_last;  /* Last calculated overhead */
++      unsigned long s_blocks_last;    /* Last seen block count */
+       struct buffer_head * s_sbh;     /* Buffer containing the super block */
+       struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
+       struct buffer_head ** s_group_desc;
diff --git a/ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..fb63542
--- /dev/null
@@ -0,0 +1,664 @@
+Add support for the uninit_groups feature to the kernel.
+
+Keep a high water mark of used inodes for each group to improve e2fsck time.
+Block and inode bitmaps can be uninitialized on disk via a flag in the
+group descriptor to avoid reading or scanning them at e2fsck time.
+A checksum of each group descriptor is used to ensure that corruption in
+the group descriptor's bit flags does not cause incorrect operation.
+
+Index: linux-rhel5/include/linux/ext3_fs.h
+===================================================================
+--- linux-rhel5.orig/include/linux/ext3_fs.h   2007-07-18 17:32:04.000000000 +0200
++++ linux-rhel5/include/linux/ext3_fs.h        2007-07-18 17:32:15.000000000 +0200
+@@ -150,16 +150,22 @@ struct ext3_allocation_request {
+  */
+ struct ext3_group_desc
+ {
+-      __le32  bg_block_bitmap;                /* Blocks bitmap block */
+-      __le32  bg_inode_bitmap;                /* Inodes bitmap block */
++      __le32  bg_block_bitmap;        /* Blocks bitmap block */
++      __le32  bg_inode_bitmap;        /* Inodes bitmap block */
+       __le32  bg_inode_table;         /* Inodes table block */
+       __le16  bg_free_blocks_count;   /* Free blocks count */
+       __le16  bg_free_inodes_count;   /* Free inodes count */
+       __le16  bg_used_dirs_count;     /* Directories count */
+-      __u16   bg_pad;
+-      __le32  bg_reserved[3];
++      __le16  bg_flags;               /* EXT3_BG_flags (UNINIT, etc) */
++      __le32  bg_reserved[2];         /* Likely block/inode bitmap checksum */
++      __le16  bg_itable_unused;       /* Unused inodes count */
++      __le16  bg_checksum;            /* crc16(sb_uuid+group+desc) */
+ };
++#define EXT3_BG_INODE_UNINIT  0x0001 /* Inode table/bitmap not in use */
++#define EXT3_BG_BLOCK_UNINIT  0x0002 /* Block bitmap not in use */
++#define EXT3_BG_INODE_ZEROED  0x0004 /* On-disk itable initialized to zero */
++
+ /*
+  * Macro-instructions used to manage group descriptors
+  */
+@@ -603,6 +609,7 @@ static inline int ext3_valid_inum(struct
+ #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER   0x0001
+ #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE     0x0002
+ #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR      0x0004
++#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM               0x0010
+ #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK      0x0020
+ #define EXT3_FEATURE_INCOMPAT_COMPRESSION     0x0001
+@@ -619,6 +626,7 @@ static inline int ext3_valid_inum(struct
+                                        EXT3_FEATURE_INCOMPAT_EXTENTS)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP   (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
++                                       EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
+                                        EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \
+                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+Index: linux-rhel5/fs/ext3/resize.c
+===================================================================
+--- linux-rhel5.orig/fs/ext3/resize.c  2007-07-15 09:36:00.000000000 +0200
++++ linux-rhel5/fs/ext3/resize.c       2007-07-18 17:32:15.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
++#include "group.h"
+ #define outside(b, first, last)       ((b) < (first) || (b) >= (last))
+ #define inside(b, first, last)        ((b) >= (first) && (b) < (last))
+@@ -834,6 +835,7 @@ int ext3_group_add(struct super_block *s
+       gdp->bg_inode_table = cpu_to_le32(input->inode_table);
+       gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
+       gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
++      gdp->bg_checksum = ext3_group_desc_csum(sbi, input->group, gdp);
+       /*
+        * Make the new blocks and inodes valid next.  We do this before
+Index: linux-rhel5/fs/ext3/super.c
+===================================================================
+--- linux-rhel5.orig/fs/ext3/super.c   2007-07-18 17:32:06.000000000 +0200
++++ linux-rhel5/fs/ext3/super.c        2007-07-18 17:35:03.000000000 +0200
+@@ -41,6 +41,7 @@
+ #include "xattr.h"
+ #include "acl.h"
+ #include "namei.h"
++#include "group.h"
+ static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
+                            unsigned long journal_devnum);
+@@ -1225,6 +1226,91 @@ static int ext3_setup_super(struct super
+       return res;
+ }
++#if !defined(CONFIG_CRC16) && !defined(CONFIG_CRC16_MODULE)
++/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
++__u16 const crc16_table[256] = {
++      0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
++      0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
++      0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
++      0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
++      0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
++      0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
++      0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
++      0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
++      0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
++      0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
++      0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
++      0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
++      0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
++      0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
++      0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
++      0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
++      0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
++      0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
++      0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
++      0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
++      0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
++      0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
++      0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
++      0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
++      0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
++      0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
++      0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
++      0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
++      0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
++      0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
++      0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
++      0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
++};
++
++static inline __u16 crc16_byte(__u16 crc, const __u8 data)
++{
++      return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
++}
++
++__u16 crc16(__u16 crc, __u8 const *buffer, size_t len)
++{
++      while (len--)
++              crc = crc16_byte(crc, *buffer++);
++      return crc;
++}
++#endif
++
++__le16 ext3_group_desc_csum(struct ext3_sb_info *sbi, __u32 block_group,
++                          struct ext3_group_desc *gdp)
++{
++      __u16 crc = 0;
++
++      if (sbi->s_es->s_feature_ro_compat &
++          cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
++              int offset = offsetof(struct ext3_group_desc, bg_checksum);
++              __le32 le_group = cpu_to_le32(block_group);
++
++              crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
++              crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
++              crc = crc16(crc, (__u8 *)gdp, offset);
++              offset += sizeof(gdp->bg_checksum); /* skip checksum */
++              BUG_ON(offset != sizeof(*gdp)); /* XXX handle s_desc_size */
++              /* for checksum of struct ext4_group_desc do the rest...
++              if ((sbi->s_es->s_feature_incompat &
++                   cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
++                   offset < le16_to_cpu(sbi->s_es->s_desc_size)) {
++                      crc = crc16(crc, (__u8 *)gdp + offset,
++                                  le16_to_cpu(sbi->s_es->s_desc_size) -
++                                  offset);
++              */
++      }
++
++      return cpu_to_le16(crc);
++}
++
++int ext3_group_desc_csum_verify(struct ext3_sb_info *sbi, __u32 block_group,
++                              struct ext3_group_desc *gdp)
++{
++      return (gdp->bg_checksum ==
++                      ext3_group_desc_csum(sbi, block_group, gdp));
++}
++
+ /* Called at mount-time, super-block is locked */
+ static int ext3_check_descriptors (struct super_block * sb)
+ {
+@@ -1279,6 +1365,13 @@ static int ext3_check_descriptors (struc
+                                       le32_to_cpu(gdp->bg_inode_table));
+                       return 0;
+               }
++              if (!ext3_group_desc_csum_verify(sbi, i, gdp)) {
++                      ext3_error(sb, __FUNCTION__,
++                                 "Checksum for group %d failed (%u!=%u)\n", i,
++                                 le16_to_cpu(ext3_group_desc_csum(sbi,i,gdp)),
++                                 le16_to_cpu(gdp->bg_checksum));
++                      return 0;
++              }
+               first_block += EXT3_BLOCKS_PER_GROUP(sb);
+               gdp++;
+       }
+Index: linux-rhel5/fs/ext3/group.h
+===================================================================
+--- /dev/null  1970-01-01 00:00:00.000000000 +0000
++++ linux-rhel5/fs/ext3/group.h        2007-07-18 17:32:15.000000000 +0200
+@@ -0,0 +1,29 @@
++/*
++ *  linux/fs/ext3/group.h
++ *
++ * Copyright (C) 2007 Cluster File Systems, Inc
++ *
++ * Author: Andreas Dilger <adilger@clusterfs.com>
++ */
++
++#ifndef _LINUX_EXT3_GROUP_H
++#define _LINUX_EXT3_GROUP_H
++#if defined(CONFIG_CRC16) || defined(CONFIG_CRC16_MODULE)
++#include <linux/crc16.h>
++#endif
++
++extern __le16 ext3_group_desc_csum(struct ext3_sb_info *sbi, __u32 group,
++                                 struct ext3_group_desc *gdp);
++extern int ext3_group_desc_csum_verify(struct ext3_sb_info *sbi, __u32 group,
++                                     struct ext3_group_desc *gdp);
++struct buffer_head *read_block_bitmap(struct super_block *sb,
++                                    unsigned int block_group);
++extern unsigned ext3_init_block_bitmap(struct super_block *sb,
++                                     struct buffer_head *bh, int group,
++                                     struct ext3_group_desc *desc);
++#define ext3_free_blocks_after_init(sb, group, desc)                  \
++              ext3_init_block_bitmap(sb, NULL, group, desc)
++extern unsigned ext3_init_inode_bitmap(struct super_block *sb,
++                                     struct buffer_head *bh, int group,
++                                     struct ext3_group_desc *desc);
++#endif /* _LINUX_EXT3_GROUP_H */
+Index: linux-rhel5/fs/ext3/ialloc.c
+===================================================================
+--- linux-rhel5.orig/fs/ext3/ialloc.c  2007-07-18 17:32:05.000000000 +0200
++++ linux-rhel5/fs/ext3/ialloc.c       2007-07-18 17:32:15.000000000 +0200
+@@ -28,6 +28,7 @@
+ #include "xattr.h"
+ #include "acl.h"
++#include "group.h"
+ /*
+  * ialloc.c contains the inodes allocation and deallocation routines
+@@ -43,6 +44,52 @@
+  * the free blocks count in the block.
+  */
++/*
++ * To avoid calling the atomic setbit hundreds or thousands of times, we only
++ * need to use it within a single byte (to ensure we get endianness right).
++ * We can use memset for the rest of the bitmap as there are no other users.
++ */
++static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
++{
++      int i;
++
++      if (start_bit >= end_bit)
++              return;
++
++      ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
++      for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
++              ext3_set_bit(i, bitmap);
++      if (i < end_bit)
++              memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
++}
++
++/* Initializes an uninitialized inode bitmap */
++unsigned ext3_init_inode_bitmap(struct super_block *sb,
++                              struct buffer_head *bh, int block_group,
++                              struct ext3_group_desc *gdp)
++{
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++
++      J_ASSERT_BH(bh, buffer_locked(bh));
++
++      /* If checksum is bad mark all blocks and inodes used to prevent
++       * allocation, essentially implementing a per-group read-only flag. */
++      if (!ext3_group_desc_csum_verify(sbi, block_group, gdp)) {
++              ext3_error(sb, __FUNCTION__, "Checksum bad for group %u\n",
++                         block_group);
++              gdp->bg_free_blocks_count = 0;
++              gdp->bg_free_inodes_count = 0;
++              gdp->bg_itable_unused = 0;
++              memset(bh->b_data, 0xff, sb->s_blocksize);
++              return 0;
++      }
++
++      memset(bh->b_data, 0, (EXT3_INODES_PER_GROUP(sb) + 7) / 8);
++      mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
++                      bh->b_data);
++
++      return EXT3_INODES_PER_GROUP(sb);
++}
+ /*
+  * Read the inode allocation bitmap for a given block_group, reading
+@@ -59,8 +106,19 @@ read_inode_bitmap(struct super_block * s
+       desc = ext3_get_group_desc(sb, block_group, NULL);
+       if (!desc)
+               goto error_out;
+-
+-      bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
++      if (desc->bg_flags & cpu_to_le16(EXT3_BG_INODE_UNINIT)) {
++              bh = sb_getblk(sb, le32_to_cpu(desc->bg_inode_bitmap));
++              if (!buffer_uptodate(bh)) {
++                      lock_buffer(bh);
++                      if (!buffer_uptodate(bh)) {
++                              ext3_init_inode_bitmap(sb, bh,block_group,desc);
++                              set_buffer_uptodate(bh);
++                      }
++                      unlock_buffer(bh);
++              }
++      } else {
++              bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
++      }
+       if (!bh)
+               ext3_error(sb, "read_inode_bitmap",
+                           "Cannot read inode bitmap - "
+@@ -169,6 +227,8 @@ void ext3_free_inode (handle_t *handle, 
+                       if (is_directory)
+                               gdp->bg_used_dirs_count = cpu_to_le16(
+                                 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
++                      gdp->bg_checksum = ext3_group_desc_csum(sbi,block_group,
++                                                              gdp);
+                       spin_unlock(sb_bgl_lock(sbi, block_group));
+                       percpu_counter_inc(&sbi->s_freeinodes_counter);
+                       if (is_directory)
+@@ -454,7 +514,7 @@ struct inode *ext3_new_inode(handle_t *h
+       struct ext3_sb_info *sbi;
+       int err = 0;
+       struct inode *ret;
+-      int i;
++      int i, free = 0;
+       /* Cannot create files in a deleted directory */
+       if (!dir || !dir->i_nlink)
+@@ -571,11 +631,13 @@ repeat_in_this_group:
+       goto out;
+ got:
+-      ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
+-      if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+-              ext3_error (sb, "ext3_new_inode",
+-                          "reserved inode or inode > inodes count - "
+-                          "block_group = %d, inode=%lu", group, ino);
++      ino++;
++      if ((group == 0 && ino < EXT3_FIRST_INO(sb)) ||
++          ino > EXT3_INODES_PER_GROUP(sb)) {
++              ext3_error(sb, __FUNCTION__,
++                         "reserved inode or inode > inodes count - "
++                         "block_group = %d, inode=%lu", group,
++                         ino + group * EXT3_INODES_PER_GROUP(sb));
+               err = -EIO;
+               goto fail;
+       }
+@@ -583,13 +645,64 @@ got:
+       BUFFER_TRACE(bh2, "get_write_access");
+       err = ext3_journal_get_write_access(handle, bh2);
+       if (err) goto fail;
++
++      /* We may have to initialize the block bitmap if it isn't already */
++      if (EXT3_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
++          gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
++              struct buffer_head *block_bh = read_block_bitmap(sb, group);
++
++              BUFFER_TRACE(block_bh, "get block bitmap access");
++              err = ext3_journal_get_write_access(handle, block_bh);
++              if (err) {
++                      brelse(block_bh);
++                      goto fail;
++              }
++
++              free = 0;
++              spin_lock(sb_bgl_lock(sbi, group));
++              /* recheck and clear flag under lock if we still need to */
++              if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
++                      gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT);
++                      free = ext3_free_blocks_after_init(sb, group, gdp);
++                      gdp->bg_free_blocks_count = cpu_to_le16(free);
++              }
++              spin_unlock(sb_bgl_lock(sbi, group));
++
++              /* Don't need to dirty bitmap block if we didn't change it */
++              if (free) {
++                      BUFFER_TRACE(block_bh, "dirty block bitmap");
++                      err = ext3_journal_dirty_metadata(handle, block_bh);
++              }
++
++              brelse(block_bh);
++              if (err)
++                      goto fail;
++      }
++
+       spin_lock(sb_bgl_lock(sbi, group));
++      /* If we didn't allocate from within the initialized part of the inode
++       * table then we need to initialize up to this inode. */
++      if (EXT3_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
++              if (gdp->bg_flags & cpu_to_le16(EXT3_BG_INODE_UNINIT)) {
++                      gdp->bg_flags &= cpu_to_le16(~EXT3_BG_INODE_UNINIT);
++                      free = 0;
++              } else {
++                      free = EXT3_INODES_PER_GROUP(sb) -
++                              le16_to_cpu(gdp->bg_itable_unused);
++              }
++
++              if (ino > free)
++                      gdp->bg_itable_unused =
++                              cpu_to_le16(EXT3_INODES_PER_GROUP(sb) - ino);
++      }
++
+       gdp->bg_free_inodes_count =
+               cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+       if (S_ISDIR(mode)) {
+               gdp->bg_used_dirs_count =
+                       cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+       }
++      gdp->bg_checksum = ext3_group_desc_csum(sbi, group, gdp);
+       spin_unlock(sb_bgl_lock(sbi, group));
+       BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
+       err = ext3_journal_dirty_metadata(handle, bh2);
+@@ -611,7 +724,7 @@ got:
+               inode->i_gid = current->fsgid;
+       inode->i_mode = mode;
+-      inode->i_ino = ino;
++      inode->i_ino = ino + group * EXT3_INODES_PER_GROUP(sb);
+       /* This is the optimal IO size (for stat), not the fs block size */
+       inode->i_blocks = 0;
+       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+Index: linux-rhel5/fs/ext3/mballoc.c
+===================================================================
+--- linux-rhel5.orig/fs/ext3/mballoc.c 2007-07-18 17:32:04.000000000 +0200
++++ linux-rhel5/fs/ext3/mballoc.c      2007-07-18 17:32:15.000000000 +0200
+@@ -36,6 +36,8 @@
+ #include <linux/seq_file.h>
+ #include <linux/version.h>
++#include "group.h"
++
+ /*
+  * MUSTDO:
+  *   - test ext3_ext_search_left() and ext3_ext_search_right()
+@@ -323,6 +325,7 @@ struct ext3_group_info {
+       unsigned long   bb_state;
+       unsigned long   bb_tid;
+       struct ext3_free_metadata *bb_md_cur;
++      struct ext3_group_desc *bb_gdp;
+       unsigned short  bb_first_free;
+       unsigned short  bb_free;
+       unsigned short  bb_fragments;
+@@ -943,10 +946,7 @@ static int ext3_mb_init_cache(struct pag
+               if (first_group + i >= EXT3_SB(sb)->s_groups_count)
+                       break;
+-              err = -EIO;
+-              desc = ext3_get_group_desc(sb, first_group + i, NULL);
+-              if (desc == NULL)
+-                      goto out;
++              desc = EXT3_GROUP_INFO(sb, first_group + i)->bb_gdp;
+               err = -ENOMEM;
+               bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
+@@ -961,7 +961,12 @@ static int ext3_mb_init_cache(struct pag
+                       unlock_buffer(bh[i]);
+                       continue;
+               }
+-
++              if (desc->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
++                      ext3_init_block_bitmap(sb, bh[i], first_group + i,desc);
++                      set_buffer_uptodate(bh[i]);
++                      unlock_buffer(bh[i]);
++                      continue;
++              }
+               get_bh(bh[i]);
+               bh[i]->b_end_io = end_buffer_read_sync;
+               submit_bh(READ, bh[i]);
+@@ -1732,6 +1737,10 @@ static int ext3_mb_good_group(struct ext
+       switch (cr) {
+               case 0:
+                       BUG_ON(ac->ac_2order == 0);
++                      /* If this group is uninitialized, skip it initially */
++                      if (grp->bb_gdp->bg_flags &
++                          cpu_to_le16(EXT3_BG_BLOCK_UNINIT))
++                              return 0;
+                       bits = ac->ac_sb->s_blocksize_bits + 1;
+                       for (i = ac->ac_2order; i <= bits; i++)
+                               if (grp->bb_counters[i] > 0)
+@@ -1825,7 +1834,9 @@ repeat:
+                       }
+                       ac->ac_groups_scanned++;
+-                      if (cr == 0)
++                      if (cr == 0 || (e3b.bd_info->bb_gdp->bg_flags &
++                                      cpu_to_le16(EXT3_BG_BLOCK_UNINIT) &&
++                                      ac->ac_2order != 0))
+                               ext3_mb_simple_scan_group(ac, &e3b);
+                       else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe)
+                               ext3_mb_scan_aligned(ac, &e3b);
+@@ -2304,12 +2315,13 @@ int ext3_mb_init_backend(struct super_bl
+                       i--;
+                       goto err_freebuddy;
+               }
++              memset(meta_group_info[j], 0, len);
+               desc = ext3_get_group_desc(sb, i, NULL);
++              meta_group_info[j]->bb_gdp = desc;
+               if (desc == NULL) {
+                       printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i);
+                       goto err_freebuddy;
+               }
+-              memset(meta_group_info[j], 0, len);
+               set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT,
+                       &meta_group_info[j]->bb_state);
+@@ -2958,9 +2970,17 @@ int ext3_mb_mark_diskspace_used(struct e
+       mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
+       spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
++      if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
++              gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT);
++              gdp->bg_free_blocks_count =
++                      cpu_to_le16(ext3_free_blocks_after_init(sb,
++                                                          ac->ac_b_ex.fe_group,
++                                                          gdp));
++      }
+       gdp->bg_free_blocks_count =
+               cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
+                               - ac->ac_b_ex.fe_len);
++      gdp->bg_checksum = ext3_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+       spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, - ac->ac_b_ex.fe_len);
+@@ -4346,6 +4366,7 @@ do_more:
+       spin_lock(sb_bgl_lock(sbi, block_group));
+       gdp->bg_free_blocks_count =
+               cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
++      gdp->bg_checksum = ext3_group_desc_csum(sbi, block_group, gdp);
+       spin_unlock(sb_bgl_lock(sbi, block_group));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, count);
+Index: linux-rhel5/fs/ext3/balloc.c
+===================================================================
+--- linux-rhel5.orig/fs/ext3/balloc.c  2007-07-18 17:32:04.000000000 +0200
++++ linux-rhel5/fs/ext3/balloc.c       2007-07-18 17:32:15.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/quotaops.h>
+ #include <linux/buffer_head.h>
++#include "group.h"
+ /*
+  * balloc.c contains the blocks allocation and deallocation routines
+  */
+@@ -73,6 +74,75 @@ struct ext3_group_desc * ext3_get_group_
+       return desc + offset;
+ }
++/* Initializes an uninitialized block bitmap if given, and returns the
++ * number of blocks free in the group. */
++unsigned ext3_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
++                              int block_group, struct ext3_group_desc *gdp)
++{
++      unsigned long start;
++      int bit, bit_max;
++      unsigned free_blocks;
++      struct ext3_sb_info *sbi = EXT3_SB(sb);
++
++      if (bh) {
++              J_ASSERT_BH(bh, buffer_locked(bh));
++
++              /* If checksum is bad mark all blocks use to prevent allocation,
++               * essentially implementing a per-group read-only flag. */
++              if (!ext3_group_desc_csum_verify(sbi, block_group, gdp)) {
++                      ext3_error(sb, __FUNCTION__,
++                                 "Checksum bad for group %u\n", block_group);
++                      gdp->bg_free_blocks_count = 0;
++                      gdp->bg_free_inodes_count = 0;
++                      gdp->bg_itable_unused = 0;
++                      memset(bh->b_data, 0xff, sb->s_blocksize);
++                      return 0;
++              }
++              memset(bh->b_data, 0, sb->s_blocksize);
++      }
++
++      /* Check for superblock and gdt backups in this group */
++      bit_max = ext3_bg_has_super(sb, block_group);
++
++      if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
++          block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
++                        sbi->s_desc_per_block) {
++              if (bit_max) {
++                      bit_max += ext3_bg_num_gdb(sb, block_group);
++                      bit_max +=le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
++              }
++      } else { /* For META_BG_BLOCK_GROUPS */
++              int group_rel = (block_group -
++                               le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
++                              EXT3_DESC_PER_BLOCK(sb);
++              if (group_rel == 0 || group_rel == 1 ||
++                  (group_rel == EXT3_DESC_PER_BLOCK(sb) - 1))
++                      bit_max += 1;
++      }
++
++      /* Last and first groups are always initialized */
++      free_blocks = EXT3_BLOCKS_PER_GROUP(sb) - bit_max;
++
++      if (bh) {
++              for (bit = 0; bit < bit_max; bit++)
++                      ext3_set_bit(bit, bh->b_data);
++
++              start = block_group * EXT3_BLOCKS_PER_GROUP(sb) +
++                      le32_to_cpu(sbi->s_es->s_first_data_block);
++
++              /* Set bits for block and inode bitmaps, and inode table */
++              ext3_set_bit(le32_to_cpu(gdp->bg_block_bitmap) - start,
++                           bh->b_data);
++              ext3_set_bit(le32_to_cpu(gdp->bg_inode_bitmap) - start,
++                           bh->b_data);
++              for (bit = le32_to_cpu(gdp->bg_inode_table) - start,
++                   bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
++                      ext3_set_bit(bit, bh->b_data);
++      }
++
++      return free_blocks - sbi->s_itb_per_group - 2;
++}
++
+ /*
+  * Read the bitmap for a given block_group, reading into the specified 
+  * slot in the superblock's bitmap cache.
+@@ -88,7 +158,19 @@ read_block_bitmap(struct super_block *sb
+       desc = ext3_get_group_desc (sb, block_group, NULL);
+       if (!desc)
+               goto error_out;
+-      bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
++      if (desc->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) {
++              bh = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap));
++              if (!buffer_uptodate(bh)) {
++                      lock_buffer(bh);
++                      if (!buffer_uptodate(bh)) {
++                              ext3_init_block_bitmap(sb, bh,block_group,desc);
++                              set_buffer_uptodate(bh);
++                      }
++                      unlock_buffer(bh);
++              }
++      } else {
++              bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
++      }
+       if (!bh)
+               ext3_error (sb, "read_block_bitmap",
+                           "Cannot read block bitmap - "
+@@ -467,6 +549,7 @@ do_more:
+       desc->bg_free_blocks_count =
+               cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
+                       group_freed);
++      desc->bg_checksum = ext3_group_desc_csum(sbi, block_group, desc);
+       spin_unlock(sb_bgl_lock(sbi, block_group));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, count);
+@@ -1434,8 +1517,11 @@ allocated:
+                       ret_block, goal_hits, goal_attempts);
+       spin_lock(sb_bgl_lock(sbi, group_no));
++      if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT))
++              gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT);
+       gdp->bg_free_blocks_count =
+                       cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
++      gdp->bg_checksum = ext3_group_desc_csum(sbi, group_no, gdp);
+       spin_unlock(sb_bgl_lock(sbi, group_no));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
diff --git a/ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch
new file mode 100644 (file)
index 0000000..a01a018
--- /dev/null
@@ -0,0 +1,445 @@
+Index: linux-2.6.16.27-0.9/fs/ext3/iopen.c
+===================================================================
+--- /dev/null  1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.16.27-0.9/fs/ext3/iopen.c        2007-06-29 08:33:12.000000000 +0200
+@@ -0,0 +1,256 @@
++/*
++ * linux/fs/ext3/iopen.c
++ *
++ * Special support for open by inode number
++ *
++ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
++ *
++ * This file may be redistributed under the terms of the GNU General
++ * Public License.
++ *
++ *
++ * Invariants:
++ *   - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias
++ *     for an inode at one time.
++ *   - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry
++ *     aliases on an inode at the same time.
++ *
++ * If we have any connected dentry aliases for an inode, use one of those
++ * in iopen_lookup().  Otherwise, we instantiate a single NFSD_DISCONNECTED
++ * dentry for this inode, which thereafter will be found by the dcache
++ * when looking up this inode number in __iopen__, so we don't return here
++ * until it is gone.
++ *
++ * If we get an inode via a regular name lookup, then we "rename" the
++ * NFSD_DISCONNECTED dentry to the proper name and parent.  This ensures
++ * existing users of the disconnected dentry will continue to use the same
++ * dentry as the connected users, and there will never be both kinds of
++ * dentry aliases at one time.
++ */
++
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/ext3_jbd.h>
++#include <linux/jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/dcache.h>
++#include <linux/security.h>
++#include "iopen.h"
++
++#ifndef assert
++#define assert(test) J_ASSERT(test)
++#endif
++
++#define IOPEN_NAME_LEN        32
++
++/*
++ * This implements looking up an inode by number.
++ */
++static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry,
++                                 struct nameidata *nd)
++{
++      struct inode *inode;
++      unsigned long ino;
++      struct list_head *lp;
++      struct dentry *alternate;
++      char buf[IOPEN_NAME_LEN];
++
++      if (dentry->d_name.len >= IOPEN_NAME_LEN)
++              return ERR_PTR(-ENAMETOOLONG);
++
++      memcpy(buf, dentry->d_name.name, dentry->d_name.len);
++      buf[dentry->d_name.len] = 0;
++
++      if (strcmp(buf, ".") == 0)
++              ino = dir->i_ino;
++      else if (strcmp(buf, "..") == 0)
++              ino = EXT3_ROOT_INO;
++      else
++              ino = simple_strtoul(buf, 0, 0);
++
++      if ((ino != EXT3_ROOT_INO &&
++           //ino != EXT3_ACL_IDX_INO &&
++           //ino != EXT3_ACL_DATA_INO &&
++           ino < EXT3_FIRST_INO(dir->i_sb)) ||
++          ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
++              return ERR_PTR(-ENOENT);
++
++      inode = iget(dir->i_sb, ino);
++      if (!inode)
++              return ERR_PTR(-EACCES);
++      if (is_bad_inode(inode)) {
++              iput(inode);
++              return ERR_PTR(-ENOENT);
++      }
++
++      assert(list_empty(&dentry->d_alias));           /* d_instantiate */
++      assert(d_unhashed(dentry));                     /* d_rehash */
++
++      /* preferrably return a connected dentry */
++      spin_lock(&dcache_lock);
++      list_for_each(lp, &inode->i_dentry) {
++              alternate = list_entry(lp, struct dentry, d_alias);
++              assert(!(alternate->d_flags & DCACHE_DISCONNECTED));
++      }
++
++      if (!list_empty(&inode->i_dentry)) {
++              alternate = list_entry(inode->i_dentry.next,
++                                     struct dentry, d_alias);
++              dget_locked(alternate);
++              spin_lock(&alternate->d_lock);
++              alternate->d_flags |= DCACHE_REFERENCED;
++              spin_unlock(&alternate->d_lock);
++              iput(inode);
++              spin_unlock(&dcache_lock);
++              return alternate;
++      }
++      dentry->d_flags |= DCACHE_DISCONNECTED;
++
++      /* d_add(), but don't drop dcache_lock before adding dentry to inode */
++      list_add(&dentry->d_alias, &inode->i_dentry);   /* d_instantiate */
++      dentry->d_inode = inode;
++
++      d_rehash_cond(dentry, 0);       /* d_rehash */
++      spin_unlock(&dcache_lock);
++
++      return NULL;
++}
++
++/* This function is spliced into ext3_lookup and does the move of a
++ * disconnected dentry (if it exists) to a connected dentry.
++ */
++struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode,
++                                  int rehash)
++{
++      struct dentry *tmp, *goal = NULL;
++      struct list_head *lp;
++
++      /* verify this dentry is really new */
++      assert(dentry->d_inode == NULL);
++      assert(list_empty(&dentry->d_alias));           /* d_instantiate */
++      if (rehash)
++              assert(d_unhashed(dentry));             /* d_rehash */
++      assert(list_empty(&dentry->d_subdirs));
++
++      spin_lock(&dcache_lock);
++      if (!inode)
++              goto do_rehash;
++
++      if (!test_opt(inode->i_sb, IOPEN))
++              goto do_instantiate;
++
++      /* preferrably return a connected dentry */
++      list_for_each(lp, &inode->i_dentry) {
++              tmp = list_entry(lp, struct dentry, d_alias);
++              if (tmp->d_flags & DCACHE_DISCONNECTED) {
++                      assert(tmp->d_alias.next == &inode->i_dentry);
++                      assert(tmp->d_alias.prev == &inode->i_dentry);
++                      goal = tmp;
++                      dget_locked(goal);
++                      break;
++              }
++      }
++
++      if (!goal)
++              goto do_instantiate;
++
++      /* Move the goal to the de hash queue */
++      goal->d_flags &= ~DCACHE_DISCONNECTED;
++      security_d_instantiate(goal, inode);
++      __d_drop(dentry);
++      d_rehash_cond(dentry, 0);
++      __d_move(goal, dentry);
++      spin_unlock(&dcache_lock);
++      iput(inode);
++
++      return goal;
++
++      /* d_add(), but don't drop dcache_lock before adding dentry to inode */
++do_instantiate:
++      list_add(&dentry->d_alias, &inode->i_dentry);   /* d_instantiate */
++      dentry->d_inode = inode;
++do_rehash:
++      if (rehash)
++              d_rehash_cond(dentry, 0);       /* d_rehash */
++      spin_unlock(&dcache_lock);
++
++      return NULL;
++}
++
++/*
++ * These are the special structures for the iopen pseudo directory.
++ */
++
++static struct inode_operations iopen_inode_operations = {
++      lookup:         iopen_lookup,           /* BKL held */
++};
++
++static struct file_operations iopen_file_operations = {
++      read:           generic_read_dir,
++};
++
++static int match_dentry(struct dentry *dentry, const char *name)
++{
++      int     len;
++
++      len = strlen(name);
++      if (dentry->d_name.len != len)
++              return 0;
++      if (strncmp(dentry->d_name.name, name, len))
++              return 0;
++      return 1;
++}
++
++/*
++ * This function is spliced into ext3_lookup and returns 1 the file
++ * name is __iopen__ and dentry has been filled in appropriately.
++ */
++int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry)
++{
++      struct inode *inode;
++
++      if (dir->i_ino != EXT3_ROOT_INO ||
++          !test_opt(dir->i_sb, IOPEN) ||
++          !match_dentry(dentry, "__iopen__"))
++              return 0;
++
++      inode = iget(dir->i_sb, EXT3_BAD_INO);
++
++      if (!inode)
++              return 0;
++      d_add(dentry, inode);
++      return 1;
++}
++
++/*
++ * This function is spliced into read_inode; it returns 1 if inode
++ * number is the one for /__iopen__, in which case the inode is filled
++ * in appropriately.  Otherwise, this fuction returns 0.
++ */
++int ext3_iopen_get_inode(struct inode *inode)
++{
++      if (inode->i_ino != EXT3_BAD_INO)
++              return 0;
++
++      inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR;
++      if (test_opt(inode->i_sb, IOPEN_NOPRIV))
++              inode->i_mode |= 0777;
++      inode->i_uid = 0;
++      inode->i_gid = 0;
++      inode->i_nlink = 1;
++      inode->i_size = 4096;
++      inode->i_atime = CURRENT_TIME;
++      inode->i_ctime = CURRENT_TIME;
++      inode->i_mtime = CURRENT_TIME;
++      EXT3_I(inode)->i_dtime = 0;
++      inode->i_blocks = 0;
++      inode->i_version = 1;
++      inode->i_generation = 0;
++
++      inode->i_op = &iopen_inode_operations;
++      inode->i_fop = &iopen_file_operations;
++      inode->i_mapping->a_ops = 0;
++
++      return 1;
++}
+Index: linux-2.6.16.27-0.9/fs/ext3/iopen.h
+===================================================================
+--- /dev/null  1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.16.27-0.9/fs/ext3/iopen.h        2007-06-29 08:24:49.000000000 +0200
+@@ -0,0 +1,15 @@
++/*
++ * iopen.h
++ *
++ * Special support for opening files by inode number.
++ *
++ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu).
++ *
++ * This file may be redistributed under the terms of the GNU General
++ * Public License.
++ */
++
++extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry);
++extern int ext3_iopen_get_inode(struct inode *inode);
++extern struct dentry *iopen_connect_dentry(struct dentry *dentry,
++                                         struct inode *inode, int rehash);
+Index: linux-2.6.16.27-0.9/fs/ext3/inode.c
+===================================================================
+--- linux-2.6.16.27-0.9.orig/fs/ext3/inode.c   2007-06-29 08:24:48.000000000 +0200
++++ linux-2.6.16.27-0.9/fs/ext3/inode.c        2007-06-29 08:24:52.000000000 +0200
+@@ -37,6 +37,7 @@
+ #include <linux/uio.h>
+ #include <linux/bio.h>
+ #include "xattr.h"
++#include "iopen.h"
+ #include "acl.h"
+ static int ext3_writepage_trans_blocks(struct inode *inode);
+@@ -2448,6 +2449,8 @@ void ext3_read_inode(struct inode * inod
+       ei->i_default_acl = EXT3_ACL_NOT_CACHED;
+ #endif
+       ei->i_block_alloc_info = NULL;
++      if (ext3_iopen_get_inode(inode))
++              return;
+       if (__ext3_get_inode_loc(inode, &iloc, 0))
+               goto bad_inode;
+Index: linux-2.6.16.27-0.9/fs/ext3/super.c
+===================================================================
+--- linux-2.6.16.27-0.9.orig/fs/ext3/super.c   2007-06-29 08:24:48.000000000 +0200
++++ linux-2.6.16.27-0.9/fs/ext3/super.c        2007-06-29 08:24:52.000000000 +0200
+@@ -678,6 +678,7 @@ enum {
+       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
+       Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
++      Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
+       Opt_grpquota
+ };
+@@ -726,6 +727,9 @@ static match_table_t tokens = {
+       {Opt_noquota, "noquota"},
+       {Opt_quota, "quota"},
+       {Opt_usrquota, "usrquota"},
++      {Opt_iopen, "iopen"},
++      {Opt_noiopen, "noiopen"},
++      {Opt_iopen_nopriv, "iopen_nopriv"},
+       {Opt_barrier, "barrier=%u"},
+       {Opt_err, NULL},
+       {Opt_resize, "resize"},
+@@ -1040,6 +1044,18 @@ clear_qf_name:
+                       else
+                               clear_opt(sbi->s_mount_opt, BARRIER);
+                       break;
++              case Opt_iopen:
++                      set_opt (sbi->s_mount_opt, IOPEN);
++                      clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
++                      break;
++              case Opt_noiopen:
++                      clear_opt (sbi->s_mount_opt, IOPEN);
++                      clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
++                      break;
++              case Opt_iopen_nopriv:
++                      set_opt (sbi->s_mount_opt, IOPEN);
++                      set_opt (sbi->s_mount_opt, IOPEN_NOPRIV);
++                      break;
+               case Opt_ignore:
+                       break;
+               case Opt_resize:
+Index: linux-2.6.16.27-0.9/fs/ext3/namei.c
+===================================================================
+--- linux-2.6.16.27-0.9.orig/fs/ext3/namei.c   2007-06-29 08:24:47.000000000 +0200
++++ linux-2.6.16.27-0.9/fs/ext3/namei.c        2007-06-29 08:24:49.000000000 +0200
+@@ -39,6 +39,7 @@
+ #include "namei.h"
+ #include "xattr.h"
++#include "iopen.h"
+ #include "acl.h"
+ /*
+@@ -1004,6 +1005,9 @@ static struct dentry *ext3_lookup(struct
+       if (dentry->d_name.len > EXT3_NAME_LEN)
+               return ERR_PTR(-ENAMETOOLONG);
++      if (ext3_check_for_iopen(dir, dentry))
++              return NULL;
++
+       bh = ext3_find_entry(dentry, &de);
+       inode = NULL;
+       if (bh) {
+@@ -1014,7 +1018,7 @@ static struct dentry *ext3_lookup(struct
+               if (!inode)
+                       return ERR_PTR(-EACCES);
+       }
+-      return d_splice_alias(inode, dentry);
++      return iopen_connect_dentry(dentry, inode, 1);
+ }
+@@ -2058,10 +2062,6 @@ static int ext3_rmdir (struct inode * di
+                             inode->i_nlink);
+       inode->i_version++;
+       clear_nlink(inode);
+-      /* There's no need to set i_disksize: the fact that i_nlink is
+-       * zero will ensure that the right thing happens during any
+-       * recovery. */
+-      inode->i_size = 0;
+       ext3_orphan_add(handle, inode);
+       inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+       ext3_mark_inode_dirty(handle, inode);
+@@ -2185,6 +2185,23 @@ out_stop:
+       return err;
+ }
++/* Like ext3_add_nondir() except for call to iopen_connect_dentry */
++static int ext3_add_link(handle_t *handle, struct dentry *dentry,
++                       struct inode *inode)
++{
++      int err = ext3_add_entry(handle, dentry, inode);
++      if (!err) {
++              err = ext3_mark_inode_dirty(handle, inode);
++              if (err == 0) {
++                      dput(iopen_connect_dentry(dentry, inode, 0));
++                      return 0;
++              }
++      }
++      ext3_dec_count(handle, inode);
++      iput(inode);
++      return err;
++}
++
+ static int ext3_link (struct dentry * old_dentry,
+               struct inode * dir, struct dentry *dentry)
+ {
+@@ -2208,7 +2225,8 @@ retry:
+       ext3_inc_count(handle, inode);
+       atomic_inc(&inode->i_count);
+-      err = ext3_add_nondir(handle, dentry, inode);
++      err = ext3_add_link(handle, dentry, inode);
++      ext3_orphan_del(handle, inode);
+       ext3_journal_stop(handle);
+       if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+Index: linux-2.6.16.27-0.9/fs/ext3/Makefile
+===================================================================
+--- linux-2.6.16.27-0.9.orig/fs/ext3/Makefile  2007-03-13 00:56:52.000000000 +0100
++++ linux-2.6.16.27-0.9/fs/ext3/Makefile       2007-06-29 08:24:49.000000000 +0200
+@@ -4,7 +4,7 @@
+ obj-$(CONFIG_EXT3_FS) += ext3.o
+-ext3-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
++ext3-y        := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
+          ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
+ ext3-$(CONFIG_EXT3_FS_XATTR)   += xattr.o xattr_user.o xattr_trusted.o
+Index: linux-2.6.16.27-0.9/include/linux/ext3_fs.h
+===================================================================
+--- linux-2.6.16.27-0.9.orig/include/linux/ext3_fs.h   2007-06-29 08:24:47.000000000 +0200
++++ linux-2.6.16.27-0.9/include/linux/ext3_fs.h        2007-06-29 08:24:49.000000000 +0200
+@@ -375,6 +375,8 @@ struct ext3_inode {
+ #define EXT3_MOUNT_QUOTA              0x80000 /* Some quota option set */
+ #define EXT3_MOUNT_USRQUOTA           0x100000 /* "old" user quota */
+ #define EXT3_MOUNT_GRPQUOTA           0x200000 /* "old" group quota */
++#define EXT3_MOUNT_IOPEN              0x400000        /* Allow access via iopen */
++#define EXT3_MOUNT_IOPEN_NOPRIV               0x800000/* Make iopen world-readable */
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series
new file mode 100644 (file)
index 0000000..7849685
--- /dev/null
@@ -0,0 +1,19 @@
+ext3-wantedi-2.6-rhel4.patch
+iopen-2.6.22-vanilla.patch
+ext3-map_inode_page-2.6.18.patch
+export-ext3-2.6-rhel4.patch
+ext3-include-fixes-2.6-rhel4.patch
+ext3-extents-2.6.22-vanilla.patch
+ext3-mballoc3-core-2.6.22-vanilla.patch
+ext3-mballoc3-2.6.22.patch
+ext3-nlinks-2.6.22-vanilla.patch
+ext3-ialloc-2.6.22-vanilla.patch
+ext3-remove-cond_resched-calls-2.6.12.patch
+ext3-filterdata-sles10.patch
+ext3-uninit-2.6.22-vanilla.patch
+ext3-nanosecond-2.6.22-vanilla.patch
+ext3-inode-version-2.6.18-vanilla.patch
+ext3-mmp-2.6.22-vanilla.patch
+ext3-fiemap-2.6.22-vanilla.patch
+ext3-block-bitmap-validation-2.6-rhel5.patch
+ext3-statfs-2.6.22.patch