From a9c9228f508e7c0de77616d25097e0b4a572eff2 Mon Sep 17 00:00:00 2001 From: yangsheng Date: Thu, 27 Mar 2008 07:20:06 +0000 Subject: [PATCH] Branch HEAD b=13397 i=adilger i=johann Add ldiskfs kernel patches for vanilla-2.6.22.14. --- .../patches/ext3-dynlocks-2.6.22-vanilla.patch | 45 + .../patches/ext3-export-journal-api.patch | 24 + .../patches/ext3-extents-2.6.22-vanilla.patch | 2896 ++++++++++++++++++++ .../patches/ext3-fiemap-2.6.22-vanilla.patch | 364 +++ .../patches/ext3-ialloc-2.6.22-vanilla.patch | 128 + .../patches/ext3-iam-2.6.22-vanilla.patch | 2301 ++++++++++++++++ .../patches/ext3-mballoc3-2.6.22.patch | 612 +++++ .../patches/ext3-mmp-2.6.22-vanilla.patch | 463 ++++ .../patches/ext3-nanosecond-2.6.22-vanilla.patch | 407 +++ .../patches/ext3-nlinks-2.6.22-vanilla.patch | 171 ++ .../patches/ext3-statfs-2.6.22.patch | 71 + .../patches/ext3-uninit-2.6.22-vanilla.patch | 664 +++++ .../patches/iopen-2.6.22-vanilla.patch | 450 +++ .../series/ldiskfs-2.6.22-vanilla.series | 25 + 14 files changed, 8621 insertions(+) create mode 100644 ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-export-journal-api.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-iam-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch create mode 100644 ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch create mode 100644 ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series diff --git a/ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6.22-vanilla.patch new file mode 100644 index 0000000..d496632 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-dynlocks-2.6.22-vanilla.patch @@ -0,0 +1,45 @@ +Index: linux-stage/fs/ext3/super.c +=================================================================== +--- linux-stage.orig/fs/ext3/super.c 2007-11-26 22:38:34.000000000 +0300 ++++ linux-stage/fs/ext3/super.c 2007-11-26 22:47:17.000000000 +0300 +@@ -3230,6 +3230,7 @@ EXPORT_SYMBOL(d_genocide); + .fs_flags = FS_REQUIRES_DEV, + }; + ++extern void dynlock_cache_init(void); + static int __init init_ext3_fs(void) + { + int err; +@@ -3244,6 +3245,7 @@ + err = init_inodecache(); + if (err) + goto out1; ++ dynlock_cache_init(); + err = register_filesystem(&ext3_fs_type); + if (err) + goto out; +@@ -3256,9 +3257,11 @@ EXPORT_SYMBOL(d_genocide); + return err; + } + ++extern void dynlock_cache_exit(void); + static void __exit exit_ext3_fs(void) + { + unregister_filesystem(&ext3_fs_type); ++ dynlock_cache_exit(); + destroy_inodecache(); + exit_ext3_xattr(); + exit_ext3_proc(); +Index: linux-stage/fs/ext3/Makefile +=================================================================== +--- linux-stage.orig/fs/ext3/Makefile 2007-11-26 22:38:32.000000000 +0300 ++++ linux-stage/fs/ext3/Makefile 2007-11-26 22:47:52.000000000 +0300 +@@ -5,7 +5,7 @@ + + ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \ + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \ +- extents.o mballoc.o ++ extents.o mballoc.o dynlocks.o + + ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o + ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o diff --git a/ldiskfs/kernel_patches/patches/ext3-export-journal-api.patch b/ldiskfs/kernel_patches/patches/ext3-export-journal-api.patch new file mode 100644 index 0000000..7511213 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-export-journal-api.patch @@ -0,0 +1,24 @@ +--- linux-src.org/fs/ext3/ext3_jbd.c ++++ linux-src/fs/ext3/ext3_jbd.c +@@ -2,6 +2,7 @@ + * Interface between ext3 and JBD + */ + ++#include + #include + + int __ext3_journal_get_undo_access(const char *where, handle_t *handle, +@@ -21,6 +21,7 @@ int __ext3_journal_get_write_access(cons + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; + } ++EXPORT_SYMBOL(__ext3_journal_get_write_access); + + int __ext3_journal_forget(const char *where, handle_t *handle, + struct buffer_head *bh) +@@ -57,3 +58,5 @@ int __ext3_journal_dirty_metadata(const + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; + } ++ ++EXPORT_SYMBOL(__ext3_journal_dirty_metadata); diff --git a/ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch new file mode 100644 index 0000000..956fc07 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-extents-2.6.22-vanilla.patch @@ -0,0 +1,2896 @@ +Index: linux-2.6.18.8/fs/ext3/dir.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/dir.c 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/fs/ext3/dir.c 2007-07-17 09:18:14.000000000 +0200 +@@ -131,8 +131,7 @@ static int ext3_readdir(struct file * fi + struct buffer_head *bh = NULL; + + map_bh.b_state = 0; +- err = ext3_get_blocks_handle(NULL, inode, blk, 1, +- &map_bh, 0, 0); ++ err = ext3_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0); + if (err > 0) { + page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, + &filp->f_ra, +Index: linux-2.6.18.8/fs/ext3/extents.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.18.8/fs/ext3/extents.c 2007-07-17 11:08:59.000000000 +0200 +@@ -0,0 +1,2272 @@ ++/* ++ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com ++ * Written by Alex Tomas ++ * ++ * Architecture independence: ++ * Copyright (c) 2005, Bull S.A. ++ * Written by Pierre Peiffer ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public Licens ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- ++ */ ++ ++/* ++ * Extents support for EXT3 ++ * ++ * TODO: ++ * - ext3*_error() should be used in some situations ++ * - analyze all BUG()/BUG_ON(), use -EIO where appropriate ++ * - smart tree reduction ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed) ++{ ++ int err; ++ ++ if (handle->h_buffer_credits > needed) ++ return handle; ++ if (!ext3_journal_extend(handle, needed)) ++ return handle; ++ err = ext3_journal_restart(handle, needed); ++ ++ return handle; ++} ++ ++/* ++ * could return: ++ * - EROFS ++ * - ENOMEM ++ */ ++static int ext3_ext_get_access(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ if (path->p_bh) { ++ /* path points to block */ ++ return ext3_journal_get_write_access(handle, path->p_bh); ++ } ++ /* path points to leaf/index in inode body */ ++ /* we use in-core data, no need to protect them */ ++ return 0; ++} ++ ++/* ++ * could return: ++ * - EROFS ++ * - ENOMEM ++ * - EIO ++ */ ++static int ext3_ext_dirty(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ int err; ++ if (path->p_bh) { ++ /* path points to block */ ++ err = ext3_journal_dirty_metadata(handle, path->p_bh); ++ } else { ++ /* path points to leaf/index in inode body */ ++ err = ext3_mark_inode_dirty(handle, inode); ++ } ++ return err; ++} ++ ++static int ext3_ext_find_goal(struct inode *inode, ++ struct ext3_ext_path *path, ++ unsigned long block) ++{ ++ struct ext3_inode_info *ei = EXT3_I(inode); ++ unsigned long bg_start; ++ unsigned long colour; ++ int depth; ++ ++ if (path) { ++ struct ext3_extent *ex; ++ depth = path->p_depth; ++ ++ /* try to predict block placement */ ++ if ((ex = path[depth].p_ext)) ++ return le32_to_cpu(ex->ee_start) ++ + (block - le32_to_cpu(ex->ee_block)); ++ ++ /* it looks index is empty ++ * try to find starting from index itself */ ++ if (path[depth].p_bh) ++ return path[depth].p_bh->b_blocknr; ++ } ++ ++ /* OK. use inode's group */ ++ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + ++ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); ++ colour = (current->pid % 16) * ++ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); ++ return bg_start + colour + block; ++} ++ ++static int ++ext3_ext_new_block(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, ++ struct ext3_extent *ex, int *err) ++{ ++ int goal, newblock; ++ ++ goal = ext3_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); ++ newblock = ext3_new_block(handle, inode, goal, err); ++ return newblock; ++} ++ ++static inline int ext3_ext_space_block(struct inode *inode) ++{ ++ int size; ++ ++ size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header)) ++ / sizeof(struct ext3_extent); ++#ifdef AGRESSIVE_TEST ++ if (size > 6) ++ size = 6; ++#endif ++ return size; ++} ++ ++static inline int ext3_ext_space_block_idx(struct inode *inode) ++{ ++ int size; ++ ++ size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header)) ++ / sizeof(struct ext3_extent_idx); ++#ifdef AGRESSIVE_TEST ++ if (size > 5) ++ size = 5; ++#endif ++ return size; ++} ++ ++static inline int ext3_ext_space_root(struct inode *inode) ++{ ++ int size; ++ ++ size = sizeof(EXT3_I(inode)->i_data); ++ size -= sizeof(struct ext3_extent_header); ++ size /= sizeof(struct ext3_extent); ++#ifdef AGRESSIVE_TEST ++ if (size > 3) ++ size = 3; ++#endif ++ return size; ++} ++ ++static inline int ext3_ext_space_root_idx(struct inode *inode) ++{ ++ int size; ++ ++ size = sizeof(EXT3_I(inode)->i_data); ++ size -= sizeof(struct ext3_extent_header); ++ size /= sizeof(struct ext3_extent_idx); ++#ifdef AGRESSIVE_TEST ++ if (size > 4) ++ size = 4; ++#endif ++ return size; ++} ++ ++static inline int ++ext3_ext_max_entries(struct inode *inode, int depth) ++{ ++ int max; ++ ++ if (depth == ext_depth(inode)) { ++ if (depth == 0) ++ max = ext3_ext_space_root(inode); ++ else ++ max = ext3_ext_space_root_idx(inode); ++ } else { ++ if (depth == 0) ++ max = ext3_ext_space_block(inode); ++ else ++ max = ext3_ext_space_block_idx(inode); ++ } ++ ++ return max; ++} ++ ++static int __ext3_ext_check_header(const char *function, int line, struct inode *inode, ++ struct ext3_extent_header *eh, ++ int depth) ++{ ++ const char *error_msg = NULL; ++ int max = 0; ++ ++ if (unlikely(eh->eh_magic != cpu_to_le16(EXT3_EXT_MAGIC))) { ++ error_msg = "invalid magic"; ++ goto corrupted; ++ } ++ if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { ++ error_msg = "unexpected eh_depth"; ++ goto corrupted; ++ } ++ if (unlikely(eh->eh_max == 0)) { ++ error_msg = "invalid eh_max"; ++ goto corrupted; ++ } ++ max = ext3_ext_max_entries(inode, depth); ++#ifdef AGRESSIVE_TEST ++ if (eh->eh_max > 3) { ++ /* inode probably got extent without defining AGRESSIVE_TEST */ ++ max = eh->eh_max; ++ } ++#endif ++ if (unlikely(le16_to_cpu(eh->eh_max) > max)) { ++ error_msg = "too large eh_max"; ++ goto corrupted; ++ } ++ if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { ++ error_msg = "invalid eh_entries"; ++ goto corrupted; ++ } ++ return 0; ++ ++corrupted: ++ ext3_error(inode->i_sb, function, ++ ":%d: bad header in inode #%lu: %s - magic %x, " ++ "entries %u, max %u(%u), depth %u(%u)", line, ++ inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), ++ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), ++ max, le16_to_cpu(eh->eh_depth), depth); ++ ++ return -EIO; ++} ++ ++#define ext3_ext_check_header(inode,eh,depth) \ ++ __ext3_ext_check_header(__FUNCTION__,__LINE__,inode,eh,depth) ++ ++#ifdef EXT_DEBUG ++static void ext3_ext_show_path(struct inode *inode, struct ext3_ext_path *path) ++{ ++ int k, l = path->p_depth; ++ ++ ext_debug(inode, "path:"); ++ for (k = 0; k <= l; k++, path++) { ++ if (path->p_idx) { ++ ext_debug(inode, " %d->%d", le32_to_cpu(path->p_idx->ei_block), ++ le32_to_cpu(path->p_idx->ei_leaf)); ++ } else if (path->p_ext) { ++ ext_debug(inode, " %d:%d:%d", ++ le32_to_cpu(path->p_ext->ee_block), ++ le16_to_cpu(path->p_ext->ee_len), ++ le32_to_cpu(path->p_ext->ee_start)); ++ } else ++ ext_debug(inode, " []"); ++ } ++ ext_debug(inode, "\n"); ++} ++ ++static void ext3_ext_show_leaf(struct inode *inode, struct ext3_ext_path *path) ++{ ++ int depth = ext_depth(inode); ++ struct ext3_extent_header *eh; ++ struct ext3_extent *ex; ++ int i; ++ ++ if (!path) ++ return; ++ ++ eh = path[depth].p_hdr; ++ ex = EXT_FIRST_EXTENT(eh); ++ ++ for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { ++ ext_debug(inode, "%d:%d:%d ", le32_to_cpu(ex->ee_block), ++ le16_to_cpu(ex->ee_len), ++ le32_to_cpu(ex->ee_start)); ++ } ++ ext_debug(inode, "\n"); ++} ++#else ++#define ext3_ext_show_path(inode,path) ++#define ext3_ext_show_leaf(inode,path) ++#endif ++ ++static void ext3_ext_drop_refs(struct ext3_ext_path *path) ++{ ++ int depth = path->p_depth; ++ int i; ++ ++ for (i = 0; i <= depth; i++, path++) ++ if (path->p_bh) { ++ brelse(path->p_bh); ++ path->p_bh = NULL; ++ } ++} ++ ++/* ++ * binary search for closest index by given block ++ * the header must be checked before calling this ++ */ ++static void ++ext3_ext_binsearch_idx(struct inode *inode, struct ext3_ext_path *path, int block) ++{ ++ struct ext3_extent_header *eh = path->p_hdr; ++ struct ext3_extent_idx *r, *l, *m; ++ ++ ext_debug(inode, "binsearch for %d(idx): ", block); ++ ++ l = EXT_FIRST_INDEX(eh) + 1; ++ r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1; ++ while (l <= r) { ++ m = l + (r - l) / 2; ++ if (block < le32_to_cpu(m->ei_block)) ++ r = m - 1; ++ else ++ l = m + 1; ++ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ei_block, ++ m, m->ei_block, r, r->ei_block); ++ } ++ ++ path->p_idx = l - 1; ++ ext_debug(inode, " -> %d->%d ", le32_to_cpu(path->p_idx->ei_block), ++ le32_to_cpu(path->p_idx->ei_leaf)); ++ ++#ifdef CHECK_BINSEARCH ++ { ++ struct ext3_extent_idx *chix, *ix; ++ int k; ++ ++ chix = ix = EXT_FIRST_INDEX(eh); ++ for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { ++ if (k != 0 && ++ le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { ++ printk("k=%d, ix=0x%p, first=0x%p\n", k, ++ ix, EXT_FIRST_INDEX(eh)); ++ printk("%u <= %u\n", ++ le32_to_cpu(ix->ei_block), ++ le32_to_cpu(ix[-1].ei_block)); ++ } ++ BUG_ON(k && le32_to_cpu(ix->ei_block) ++ <= le32_to_cpu(ix[-1].ei_block)); ++ if (block < le32_to_cpu(ix->ei_block)) ++ break; ++ chix = ix; ++ } ++ BUG_ON(chix != path->p_idx); ++ } ++#endif ++ ++} ++ ++/* ++ * binary search for closest extent by given block ++ * the header must be checked before calling this ++ */ ++static void ++ext3_ext_binsearch(struct inode *inode, struct ext3_ext_path *path, int block) ++{ ++ struct ext3_extent_header *eh = path->p_hdr; ++ struct ext3_extent *r, *l, *m; ++ ++ if (eh->eh_entries == 0) { ++ /* ++ * this leaf is empty yet: ++ * we get such a leaf in split/add case ++ */ ++ return; ++ } ++ ++ ext_debug(inode, "binsearch for %d: ", block); ++ ++ l = EXT_FIRST_EXTENT(eh) + 1; ++ r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1; ++ ++ while (l <= r) { ++ m = l + (r - l) / 2; ++ if (block < le32_to_cpu(m->ee_block)) ++ r = m - 1; ++ else ++ l = m + 1; ++ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, l->ee_block, ++ m, m->ee_block, r, r->ee_block); ++ } ++ ++ path->p_ext = l - 1; ++ ext_debug(inode, " -> %d:%d:%d ", ++ le32_to_cpu(path->p_ext->ee_block), ++ le32_to_cpu(path->p_ext->ee_start), ++ le16_to_cpu(path->p_ext->ee_len)); ++ ++#ifdef CHECK_BINSEARCH ++ { ++ struct ext3_extent *chex, *ex; ++ int k; ++ ++ chex = ex = EXT_FIRST_EXTENT(eh); ++ for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { ++ BUG_ON(k && le32_to_cpu(ex->ee_block) ++ <= le32_to_cpu(ex[-1].ee_block)); ++ if (block < le32_to_cpu(ex->ee_block)) ++ break; ++ chex = ex; ++ } ++ BUG_ON(chex != path->p_ext); ++ } ++#endif ++ ++} ++ ++int ext3_ext_tree_init(handle_t *handle, struct inode *inode) ++{ ++ struct ext3_extent_header *eh; ++ ++ eh = ext_inode_hdr(inode); ++ eh->eh_depth = 0; ++ eh->eh_entries = 0; ++ eh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC); ++ eh->eh_max = cpu_to_le16(ext3_ext_space_root(inode)); ++ ext3_mark_inode_dirty(handle, inode); ++ ext3_ext_invalidate_cache(inode); ++ return 0; ++} ++ ++struct ext3_ext_path * ++ext3_ext_find_extent(struct inode *inode, int block, struct ext3_ext_path *path) ++{ ++ struct ext3_extent_header *eh; ++ struct buffer_head *bh; ++ short int depth, i, ppos = 0, alloc = 0; ++ ++ eh = ext_inode_hdr(inode); ++ i = depth = ext_depth(inode); ++ if (ext3_ext_check_header(inode, eh, depth)) ++ return ERR_PTR(-EIO); ++ ++ /* account possible depth increase */ ++ if (!path) { ++ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2), ++ GFP_NOFS); ++ if (!path) ++ return ERR_PTR(-ENOMEM); ++ alloc = 1; ++ } ++ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1)); ++ path[0].p_hdr = eh; ++ ++ /* walk through the tree */ ++ while (i) { ++ ext_debug(inode, "depth %d: num %d, max %d\n", ++ ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); ++ ++ ext3_ext_binsearch_idx(inode, path + ppos, block); ++ path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf); ++ path[ppos].p_depth = i; ++ path[ppos].p_ext = NULL; ++ ++ bh = sb_bread(inode->i_sb, path[ppos].p_block); ++ if (!bh) ++ goto err; ++ ++ eh = ext_block_hdr(bh); ++ ppos++; ++ BUG_ON(ppos > depth); ++ path[ppos].p_bh = bh; ++ path[ppos].p_hdr = eh; ++ i--; ++ ++ if (ext3_ext_check_header(inode, eh, i)) ++ goto err; ++ } ++ ++ path[ppos].p_depth = i; ++ path[ppos].p_hdr = eh; ++ path[ppos].p_ext = NULL; ++ path[ppos].p_idx = NULL; ++ ++ /* find extent */ ++ ext3_ext_binsearch(inode, path + ppos, block); ++ ++ ext3_ext_show_path(inode, path); ++ ++ return path; ++ ++err: ++ ext3_ext_drop_refs(path); ++ if (alloc) ++ kfree(path); ++ return ERR_PTR(-EIO); ++} ++ ++/* ++ * insert new index [logical;ptr] into the block at cupr ++ * it check where to insert: before curp or after curp ++ */ ++static int ext3_ext_insert_index(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *curp, ++ int logical, int ptr) ++{ ++ struct ext3_extent_idx *ix; ++ int len, err; ++ ++ if ((err = ext3_ext_get_access(handle, inode, curp))) ++ return err; ++ ++ BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); ++ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; ++ if (logical > le32_to_cpu(curp->p_idx->ei_block)) { ++ /* insert after */ ++ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { ++ len = (len - 1) * sizeof(struct ext3_extent_idx); ++ len = len < 0 ? 0 : len; ++ ext_debug(inode, "insert new index %d after: %d. " ++ "move %d from 0x%p to 0x%p\n", ++ logical, ptr, len, ++ (curp->p_idx + 1), (curp->p_idx + 2)); ++ memmove(curp->p_idx + 2, curp->p_idx + 1, len); ++ } ++ ix = curp->p_idx + 1; ++ } else { ++ /* insert before */ ++ len = len * sizeof(struct ext3_extent_idx); ++ len = len < 0 ? 0 : len; ++ ext_debug(inode, "insert new index %d before: %d. " ++ "move %d from 0x%p to 0x%p\n", ++ logical, ptr, len, ++ curp->p_idx, (curp->p_idx + 1)); ++ memmove(curp->p_idx + 1, curp->p_idx, len); ++ ix = curp->p_idx; ++ } ++ ++ ix->ei_block = cpu_to_le32(logical); ++ ix->ei_leaf = cpu_to_le32(ptr); ++ ix->ei_leaf_hi = ix->ei_unused = 0; ++ curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); ++ ++ BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) ++ > le16_to_cpu(curp->p_hdr->eh_max)); ++ BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); ++ ++ err = ext3_ext_dirty(handle, inode, curp); ++ ext3_std_error(inode->i_sb, err); ++ ++ return err; ++} ++ ++/* ++ * routine inserts new subtree into the path, using free index entry ++ * at depth 'at: ++ * - allocates all needed blocks (new leaf and all intermediate index blocks) ++ * - makes decision where to split ++ * - moves remaining extens and index entries (right to the split point) ++ * into the newly allocated blocks ++ * - initialize subtree ++ */ ++static int ext3_ext_split(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, ++ struct ext3_extent *newext, int at) ++{ ++ struct buffer_head *bh = NULL; ++ int depth = ext_depth(inode); ++ struct ext3_extent_header *neh; ++ struct ext3_extent_idx *fidx; ++ struct ext3_extent *ex; ++ int i = at, k, m, a; ++ unsigned long newblock, oldblock; ++ __le32 border; ++ int *ablocks = NULL; /* array of allocated blocks */ ++ int err = 0; ++ ++ /* make decision: where to split? */ ++ /* FIXME: now desicion is simplest: at current extent */ ++ ++ /* if current leaf will be splitted, then we should use ++ * border from split point */ ++ BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); ++ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { ++ border = path[depth].p_ext[1].ee_block; ++ ext_debug(inode, "leaf will be splitted." ++ " next leaf starts at %d\n", ++ le32_to_cpu(border)); ++ } else { ++ border = newext->ee_block; ++ ext_debug(inode, "leaf will be added." ++ " next leaf starts at %d\n", ++ le32_to_cpu(border)); ++ } ++ ++ /* ++ * if error occurs, then we break processing ++ * and turn filesystem read-only. so, index won't ++ * be inserted and tree will be in consistent ++ * state. next mount will repair buffers too ++ */ ++ ++ /* ++ * get array to track all allocated blocks ++ * we need this to handle errors and free blocks ++ * upon them ++ */ ++ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS); ++ if (!ablocks) ++ return -ENOMEM; ++ memset(ablocks, 0, sizeof(unsigned long) * depth); ++ ++ /* allocate all needed blocks */ ++ ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); ++ for (a = 0; a < depth - at; a++) { ++ newblock = ext3_ext_new_block(handle, inode, path, newext, &err); ++ if (newblock == 0) ++ goto cleanup; ++ ablocks[a] = newblock; ++ } ++ ++ /* initialize new leaf */ ++ newblock = ablocks[--a]; ++ BUG_ON(newblock == 0); ++ bh = sb_getblk(inode->i_sb, newblock); ++ if (!bh) { ++ err = -EIO; ++ goto cleanup; ++ } ++ lock_buffer(bh); ++ ++ if ((err = ext3_journal_get_create_access(handle, bh))) ++ goto cleanup; ++ ++ neh = ext_block_hdr(bh); ++ neh->eh_entries = 0; ++ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode)); ++ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC); ++ neh->eh_depth = 0; ++ ex = EXT_FIRST_EXTENT(neh); ++ ++ /* move remain of path[depth] to the new leaf */ ++ BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); ++ /* start copy from next extent */ ++ /* TODO: we could do it by single memmove */ ++ m = 0; ++ path[depth].p_ext++; ++ while (path[depth].p_ext <= ++ EXT_MAX_EXTENT(path[depth].p_hdr)) { ++ ext_debug(inode, "move %d:%d:%d in new leaf %lu\n", ++ le32_to_cpu(path[depth].p_ext->ee_block), ++ le32_to_cpu(path[depth].p_ext->ee_start), ++ le16_to_cpu(path[depth].p_ext->ee_len), ++ newblock); ++ /*memmove(ex++, path[depth].p_ext++, ++ sizeof(struct ext3_extent)); ++ neh->eh_entries++;*/ ++ path[depth].p_ext++; ++ m++; ++ } ++ if (m) { ++ memmove(ex, path[depth].p_ext-m, sizeof(struct ext3_extent)*m); ++ neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m); ++ } ++ ++ set_buffer_uptodate(bh); ++ unlock_buffer(bh); ++ ++ if ((err = ext3_journal_dirty_metadata(handle, bh))) ++ goto cleanup; ++ brelse(bh); ++ bh = NULL; ++ ++ /* correct old leaf */ ++ if (m) { ++ if ((err = ext3_ext_get_access(handle, inode, path + depth))) ++ goto cleanup; ++ path[depth].p_hdr->eh_entries = ++ cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); ++ if ((err = ext3_ext_dirty(handle, inode, path + depth))) ++ goto cleanup; ++ ++ } ++ ++ /* create intermediate indexes */ ++ k = depth - at - 1; ++ BUG_ON(k < 0); ++ if (k) ++ ext_debug(inode, "create %d intermediate indices\n", k); ++ /* insert new index into current index block */ ++ /* current depth stored in i var */ ++ i = depth - 1; ++ while (k--) { ++ oldblock = newblock; ++ newblock = ablocks[--a]; ++ bh = sb_getblk(inode->i_sb, newblock); ++ if (!bh) { ++ err = -EIO; ++ goto cleanup; ++ } ++ lock_buffer(bh); ++ ++ if ((err = ext3_journal_get_create_access(handle, bh))) ++ goto cleanup; ++ ++ neh = ext_block_hdr(bh); ++ neh->eh_entries = cpu_to_le16(1); ++ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC); ++ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode)); ++ neh->eh_depth = cpu_to_le16(depth - i); ++ fidx = EXT_FIRST_INDEX(neh); ++ fidx->ei_block = border; ++ fidx->ei_leaf = cpu_to_le32(oldblock); ++ fidx->ei_leaf_hi = fidx->ei_unused = 0; ++ ++ ext_debug(inode, "int.index at %d (block %lu): %lu -> %lu\n", i, ++ newblock, (unsigned long) le32_to_cpu(border), ++ oldblock); ++ /* copy indexes */ ++ m = 0; ++ path[i].p_idx++; ++ ++ ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, ++ EXT_MAX_INDEX(path[i].p_hdr)); ++ BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != ++ EXT_LAST_INDEX(path[i].p_hdr)); ++ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { ++ ext_debug(inode, "%d: move %d:%d in new index %lu\n", i, ++ le32_to_cpu(path[i].p_idx->ei_block), ++ le32_to_cpu(path[i].p_idx->ei_leaf), ++ newblock); ++ /*memmove(++fidx, path[i].p_idx++, ++ sizeof(struct ext3_extent_idx)); ++ neh->eh_entries++; ++ BUG_ON(neh->eh_entries > neh->eh_max);*/ ++ path[i].p_idx++; ++ m++; ++ } ++ if (m) { ++ memmove(++fidx, path[i].p_idx - m, ++ sizeof(struct ext3_extent_idx) * m); ++ neh->eh_entries = ++ cpu_to_le16(le16_to_cpu(neh->eh_entries) + m); ++ } ++ set_buffer_uptodate(bh); ++ unlock_buffer(bh); ++ ++ if ((err = ext3_journal_dirty_metadata(handle, bh))) ++ goto cleanup; ++ brelse(bh); ++ bh = NULL; ++ ++ /* correct old index */ ++ if (m) { ++ err = ext3_ext_get_access(handle, inode, path + i); ++ if (err) ++ goto cleanup; ++ path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m); ++ err = ext3_ext_dirty(handle, inode, path + i); ++ if (err) ++ goto cleanup; ++ } ++ ++ i--; ++ } ++ ++ /* insert new index */ ++ if (err) ++ goto cleanup; ++ ++ err = ext3_ext_insert_index(handle, inode, path + at, ++ le32_to_cpu(border), newblock); ++ ++cleanup: ++ if (bh) { ++ if (buffer_locked(bh)) ++ unlock_buffer(bh); ++ brelse(bh); ++ } ++ ++ if (err) { ++ /* free all allocated blocks in error case */ ++ for (i = 0; i < depth; i++) { ++ if (!ablocks[i]) ++ continue; ++ ext3_free_blocks(handle, inode, ablocks[i], 1); ++ } ++ } ++ kfree(ablocks); ++ ++ return err; ++} ++ ++/* ++ * routine implements tree growing procedure: ++ * - allocates new block ++ * - moves top-level data (index block or leaf) into the new block ++ * - initialize new top-level, creating index that points to the ++ * just created block ++ */ ++static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, ++ struct ext3_extent *newext) ++{ ++ struct ext3_ext_path *curp = path; ++ struct ext3_extent_header *neh; ++ struct ext3_extent_idx *fidx; ++ struct buffer_head *bh; ++ unsigned long newblock; ++ int err = 0; ++ ++ newblock = ext3_ext_new_block(handle, inode, path, newext, &err); ++ if (newblock == 0) ++ return err; ++ ++ bh = sb_getblk(inode->i_sb, newblock); ++ if (!bh) { ++ err = -EIO; ++ ext3_std_error(inode->i_sb, err); ++ return err; ++ } ++ lock_buffer(bh); ++ ++ if ((err = ext3_journal_get_create_access(handle, bh))) { ++ unlock_buffer(bh); ++ goto out; ++ } ++ ++ /* move top-level index/leaf into new block */ ++ memmove(bh->b_data, curp->p_hdr, sizeof(EXT3_I(inode)->i_data)); ++ ++ /* set size of new block */ ++ neh = ext_block_hdr(bh); ++ /* old root could have indexes or leaves ++ * so calculate e_max right way */ ++ if (ext_depth(inode)) ++ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode)); ++ else ++ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode)); ++ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC); ++ set_buffer_uptodate(bh); ++ unlock_buffer(bh); ++ ++ if ((err = ext3_journal_dirty_metadata(handle, bh))) ++ goto out; ++ ++ /* create index in new top-level index: num,max,pointer */ ++ if ((err = ext3_ext_get_access(handle, inode, curp))) ++ goto out; ++ ++ curp->p_hdr->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC); ++ curp->p_hdr->eh_max = cpu_to_le16(ext3_ext_space_root_idx(inode)); ++ curp->p_hdr->eh_entries = cpu_to_le16(1); ++ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); ++ /* FIXME: it works, but actually path[0] can be index */ ++ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; ++ curp->p_idx->ei_leaf = cpu_to_le32(newblock); ++ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0; ++ ++ neh = ext_inode_hdr(inode); ++ fidx = EXT_FIRST_INDEX(neh); ++ ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n", ++ le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), ++ le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf)); ++ ++ neh->eh_depth = cpu_to_le16(path->p_depth + 1); ++ err = ext3_ext_dirty(handle, inode, curp); ++out: ++ brelse(bh); ++ ++ return err; ++} ++ ++/* ++ * routine finds empty index and adds new leaf. if no free index found ++ * then it requests in-depth growing ++ */ ++static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, ++ struct ext3_extent *newext) ++{ ++ struct ext3_ext_path *curp; ++ int depth, i, err = 0; ++ ++repeat: ++ i = depth = ext_depth(inode); ++ ++ /* walk up to the tree and look for free index entry */ ++ curp = path + depth; ++ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { ++ i--; ++ curp--; ++ } ++ ++ /* we use already allocated block for index block ++ * so, subsequent data blocks should be contigoues */ ++ if (EXT_HAS_FREE_INDEX(curp)) { ++ /* if we found index with free entry, then use that ++ * entry: create all needed subtree and add new leaf */ ++ err = ext3_ext_split(handle, inode, path, newext, i); ++ if (err) ++ goto out; ++ ++ /* refill path */ ++ ext3_ext_drop_refs(path); ++ path = ext3_ext_find_extent(inode, ++ le32_to_cpu(newext->ee_block), ++ path); ++ if (IS_ERR(path)) ++ err = PTR_ERR(path); ++ } else { ++ /* tree is full, time to grow in depth */ ++ err = ext3_ext_grow_indepth(handle, inode, path, newext); ++ if (err) ++ goto out; ++ ++ /* refill path */ ++ ext3_ext_drop_refs(path); ++ path = ext3_ext_find_extent(inode, ++ le32_to_cpu(newext->ee_block), ++ path); ++ if (IS_ERR(path)) { ++ err = PTR_ERR(path); ++ goto out; ++ } ++ ++ /* ++ * only first (depth 0 -> 1) produces free space ++ * in all other cases we have to split growed tree ++ */ ++ depth = ext_depth(inode); ++ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { ++ /* now we need split */ ++ goto repeat; ++ } ++ } ++ ++out: ++ return err; ++} ++ ++/* ++ * search the closest allocated block to the left for *logical ++ * and returns it at @logical + it's physical address at @phys ++ * if *logical is the smallest allocated block, the function ++ * returns 0 at @phys ++ * return value contains 0 (success) or error code ++ */ ++int ++ext3_ext_search_left(struct inode *inode, struct ext3_ext_path *path, ++ unsigned long *logical, unsigned long *phys) ++{ ++ struct ext3_extent_idx *ix; ++ struct ext3_extent *ex; ++ int depth; ++ ++ BUG_ON(path == NULL); ++ depth = path->p_depth; ++ *phys = 0; ++ ++ if (depth == 0 && path->p_ext == NULL) ++ return 0; ++ ++ /* usually extent in the path covers blocks smaller ++ * then *logical, but it can be that extent is the ++ * first one in the file */ ++ ++ ex = path[depth].p_ext; ++ if (*logical < le32_to_cpu(ex->ee_block)) { ++ BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); ++ while (--depth >= 0) { ++ ix = path[depth].p_idx; ++ BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); ++ } ++ return 0; ++ } ++ ++ BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)); ++ ++ *logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1; ++ *phys = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - 1; ++ return 0; ++} ++EXPORT_SYMBOL(ext3_ext_search_left); ++ ++/* ++ * search the closest allocated block to the right for *logical ++ * and returns it at @logical + it's physical address at @phys ++ * if *logical is the smallest allocated block, the function ++ * returns 0 at @phys ++ * return value contains 0 (success) or error code ++ */ ++int ++ext3_ext_search_right(struct inode *inode, struct ext3_ext_path *path, ++ unsigned long *logical, unsigned long *phys) ++{ ++ struct buffer_head *bh = NULL; ++ struct ext3_extent_header *eh; ++ struct ext3_extent_idx *ix; ++ struct ext3_extent *ex; ++ unsigned long block; ++ int depth; ++ ++ BUG_ON(path == NULL); ++ depth = path->p_depth; ++ *phys = 0; ++ ++ if (depth == 0 && path->p_ext == NULL) ++ return 0; ++ ++ /* usually extent in the path covers blocks smaller ++ * then *logical, but it can be that extent is the ++ * first one in the file */ ++ ++ ex = path[depth].p_ext; ++ if (*logical < le32_to_cpu(ex->ee_block)) { ++ BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); ++ while (--depth >= 0) { ++ ix = path[depth].p_idx; ++ BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); ++ } ++ *logical = le32_to_cpu(ex->ee_block); ++ *phys = le32_to_cpu(ex->ee_start); ++ return 0; ++ } ++ ++ BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)); ++ ++ if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { ++ /* next allocated block in this leaf */ ++ ex++; ++ *logical = le32_to_cpu(ex->ee_block); ++ *phys = le32_to_cpu(ex->ee_start); ++ return 0; ++ } ++ ++ /* go up and search for index to the right */ ++ while (--depth >= 0) { ++ ix = path[depth].p_idx; ++ if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) ++ break; ++ } ++ ++ if (depth < 0) { ++ /* we've gone up to the root and ++ * found no index to the right */ ++ return 0; ++ } ++ ++ /* we've found index to the right, let's ++ * follow it and find the closest allocated ++ * block to the right */ ++ ix++; ++ block = le32_to_cpu(ix->ei_leaf); ++ while (++depth < path->p_depth) { ++ bh = sb_bread(inode->i_sb, block); ++ if (bh == NULL) ++ return -EIO; ++ eh = ext_block_hdr(bh); ++ if (ext3_ext_check_header(inode, eh, path->p_depth - depth)) { ++ brelse(bh); ++ return -EIO; ++ } ++ ix = EXT_FIRST_INDEX(eh); ++ block = le32_to_cpu(ix->ei_leaf); ++ brelse(bh); ++ } ++ ++ bh = sb_bread(inode->i_sb, block); ++ if (bh == NULL) ++ return -EIO; ++ eh = ext_block_hdr(bh); ++ if (ext3_ext_check_header(inode, eh, 0)) { ++ brelse(bh); ++ return -EIO; ++ } ++ ex = EXT_FIRST_EXTENT(eh); ++ *logical = le32_to_cpu(ex->ee_block); ++ *phys = le32_to_cpu(ex->ee_start); ++ brelse(bh); ++ return 0; ++ ++} ++EXPORT_SYMBOL(ext3_ext_search_right); ++ ++ ++ ++/* ++ * returns allocated block in subsequent extent or EXT_MAX_BLOCK ++ * NOTE: it consider block number from index entry as ++ * allocated block. thus, index entries have to be consistent ++ * with leafs ++ */ ++static unsigned long ++ext3_ext_next_allocated_block(struct ext3_ext_path *path) ++{ ++ int depth; ++ ++ BUG_ON(path == NULL); ++ depth = path->p_depth; ++ ++ if (depth == 0 && path->p_ext == NULL) ++ return EXT_MAX_BLOCK; ++ ++ while (depth >= 0) { ++ if (depth == path->p_depth) { ++ /* leaf */ ++ if (path[depth].p_ext != ++ EXT_LAST_EXTENT(path[depth].p_hdr)) ++ return le32_to_cpu(path[depth].p_ext[1].ee_block); ++ } else { ++ /* index */ ++ if (path[depth].p_idx != ++ EXT_LAST_INDEX(path[depth].p_hdr)) ++ return le32_to_cpu(path[depth].p_idx[1].ei_block); ++ } ++ depth--; ++ } ++ ++ return EXT_MAX_BLOCK; ++} ++ ++/* ++ * returns first allocated block from next leaf or EXT_MAX_BLOCK ++ */ ++static unsigned ext3_ext_next_leaf_block(struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ int depth; ++ ++ BUG_ON(path == NULL); ++ depth = path->p_depth; ++ ++ /* zero-tree has no leaf blocks at all */ ++ if (depth == 0) ++ return EXT_MAX_BLOCK; ++ ++ /* go to index block */ ++ depth--; ++ ++ while (depth >= 0) { ++ if (path[depth].p_idx != ++ EXT_LAST_INDEX(path[depth].p_hdr)) ++ return le32_to_cpu(path[depth].p_idx[1].ei_block); ++ depth--; ++ } ++ ++ return EXT_MAX_BLOCK; ++} ++ ++/* ++ * if leaf gets modified and modified extent is first in the leaf ++ * then we have to correct all indexes above ++ * TODO: do we need to correct tree in all cases? ++ */ ++int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ struct ext3_extent_header *eh; ++ int depth = ext_depth(inode); ++ struct ext3_extent *ex; ++ __le32 border; ++ int k, err = 0; ++ ++ eh = path[depth].p_hdr; ++ ex = path[depth].p_ext; ++ BUG_ON(ex == NULL); ++ BUG_ON(eh == NULL); ++ ++ if (depth == 0) { ++ /* there is no tree at all */ ++ return 0; ++ } ++ ++ if (ex != EXT_FIRST_EXTENT(eh)) { ++ /* we correct tree if first leaf got modified only */ ++ return 0; ++ } ++ ++ /* ++ * TODO: we need correction if border is smaller then current one ++ */ ++ k = depth - 1; ++ border = path[depth].p_ext->ee_block; ++ if ((err = ext3_ext_get_access(handle, inode, path + k))) ++ return err; ++ path[k].p_idx->ei_block = border; ++ if ((err = ext3_ext_dirty(handle, inode, path + k))) ++ return err; ++ ++ while (k--) { ++ /* change all left-side indexes */ ++ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) ++ break; ++ if ((err = ext3_ext_get_access(handle, inode, path + k))) ++ break; ++ path[k].p_idx->ei_block = border; ++ if ((err = ext3_ext_dirty(handle, inode, path + k))) ++ break; ++ } ++ ++ return err; ++} ++ ++static int inline ++ext3_can_extents_be_merged(struct inode *inode, struct ext3_extent *ex1, ++ struct ext3_extent *ex2) ++{ ++ /* FIXME: 48bit support */ ++ if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) ++ != le32_to_cpu(ex2->ee_block)) ++ return 0; ++ ++#ifdef AGRESSIVE_TEST ++ if (le16_to_cpu(ex1->ee_len) >= 4) ++ return 0; ++#endif ++ ++ if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len) ++ == le32_to_cpu(ex2->ee_start)) ++ return 1; ++ return 0; ++} ++ ++/* ++ * this routine tries to merge requsted extent into the existing ++ * extent or inserts requested extent as new one into the tree, ++ * creating new leaf in no-space case ++ */ ++int ext3_ext_insert_extent(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, ++ struct ext3_extent *newext) ++{ ++ struct ext3_extent_header * eh; ++ struct ext3_extent *ex, *fex; ++ struct ext3_extent *nearex; /* nearest extent */ ++ struct ext3_ext_path *npath = NULL; ++ int depth, len, err, next; ++ ++ BUG_ON(newext->ee_len == 0); ++ depth = ext_depth(inode); ++ ex = path[depth].p_ext; ++ BUG_ON(path[depth].p_hdr == NULL); ++ ++ /* try to insert block into found extent and return */ ++ if (ex && ext3_can_extents_be_merged(inode, ex, newext)) { ++ ext_debug(inode, "append %d block to %d:%d (from %d)\n", ++ le16_to_cpu(newext->ee_len), ++ le32_to_cpu(ex->ee_block), ++ le16_to_cpu(ex->ee_len), ++ le32_to_cpu(ex->ee_start)); ++ if ((err = ext3_ext_get_access(handle, inode, path + depth))) ++ return err; ++ ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) ++ + le16_to_cpu(newext->ee_len)); ++ eh = path[depth].p_hdr; ++ nearex = ex; ++ goto merge; ++ } ++ ++repeat: ++ depth = ext_depth(inode); ++ eh = path[depth].p_hdr; ++ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) ++ goto has_space; ++ ++ /* probably next leaf has space for us? */ ++ fex = EXT_LAST_EXTENT(eh); ++ next = ext3_ext_next_leaf_block(inode, path); ++ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) ++ && next != EXT_MAX_BLOCK) { ++ ext_debug(inode, "next leaf block - %d\n", next); ++ BUG_ON(npath != NULL); ++ npath = ext3_ext_find_extent(inode, next, NULL); ++ if (IS_ERR(npath)) ++ return PTR_ERR(npath); ++ BUG_ON(npath->p_depth != path->p_depth); ++ eh = npath[depth].p_hdr; ++ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { ++ ext_debug(inode, "next leaf isnt full(%d)\n", ++ le16_to_cpu(eh->eh_entries)); ++ path = npath; ++ goto repeat; ++ } ++ ext_debug(inode, "next leaf has no free space(%d,%d)\n", ++ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); ++ } ++ ++ /* ++ * there is no free space in found leaf ++ * we're gonna add new leaf in the tree ++ */ ++ err = ext3_ext_create_new_leaf(handle, inode, path, newext); ++ if (err) ++ goto cleanup; ++ depth = ext_depth(inode); ++ eh = path[depth].p_hdr; ++ ++has_space: ++ nearex = path[depth].p_ext; ++ ++ if ((err = ext3_ext_get_access(handle, inode, path + depth))) ++ goto cleanup; ++ ++ if (!nearex) { ++ /* there is no extent in this leaf, create first one */ ++ ext_debug(inode, "first extent in the leaf: %d:%d:%d\n", ++ le32_to_cpu(newext->ee_block), ++ le32_to_cpu(newext->ee_start), ++ le16_to_cpu(newext->ee_len)); ++ path[depth].p_ext = EXT_FIRST_EXTENT(eh); ++ } else if (le32_to_cpu(newext->ee_block) ++ > le32_to_cpu(nearex->ee_block)) { ++ /* BUG_ON(newext->ee_block == nearex->ee_block); */ ++ if (nearex != EXT_LAST_EXTENT(eh)) { ++ len = EXT_MAX_EXTENT(eh) - nearex; ++ len = (len - 1) * sizeof(struct ext3_extent); ++ len = len < 0 ? 0 : len; ++ ext_debug(inode, "insert %d:%d:%d after: nearest 0x%p, " ++ "move %d from 0x%p to 0x%p\n", ++ le32_to_cpu(newext->ee_block), ++ le32_to_cpu(newext->ee_start), ++ le16_to_cpu(newext->ee_len), ++ nearex, len, nearex + 1, nearex + 2); ++ memmove(nearex + 2, nearex + 1, len); ++ } ++ path[depth].p_ext = nearex + 1; ++ } else { ++ BUG_ON(newext->ee_block == nearex->ee_block); ++ len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent); ++ len = len < 0 ? 0 : len; ++ ext_debug(inode, "insert %d:%d:%d before: nearest 0x%p, " ++ "move %d from 0x%p to 0x%p\n", ++ le32_to_cpu(newext->ee_block), ++ le32_to_cpu(newext->ee_start), ++ le16_to_cpu(newext->ee_len), ++ nearex, len, nearex + 1, nearex + 2); ++ memmove(nearex + 1, nearex, len); ++ path[depth].p_ext = nearex; ++ } ++ ++ eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1); ++ nearex = path[depth].p_ext; ++ nearex->ee_block = newext->ee_block; ++ nearex->ee_start = newext->ee_start; ++ nearex->ee_len = newext->ee_len; ++ /* FIXME: support for large fs */ ++ nearex->ee_start_hi = 0; ++ ++merge: ++ /* try to merge extents to the right */ ++ while (nearex < EXT_LAST_EXTENT(eh)) { ++ if (!ext3_can_extents_be_merged(inode, nearex, nearex + 1)) ++ break; ++ /* merge with next extent! */ ++ nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len) ++ + le16_to_cpu(nearex[1].ee_len)); ++ if (nearex + 1 < EXT_LAST_EXTENT(eh)) { ++ len = (EXT_LAST_EXTENT(eh) - nearex - 1) ++ * sizeof(struct ext3_extent); ++ memmove(nearex + 1, nearex + 2, len); ++ } ++ eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); ++ BUG_ON(eh->eh_entries == 0); ++ } ++ ++ /* try to merge extents to the left */ ++ ++ /* time to correct all indexes above */ ++ err = ext3_ext_correct_indexes(handle, inode, path); ++ if (err) ++ goto cleanup; ++ ++ err = ext3_ext_dirty(handle, inode, path + depth); ++ ++cleanup: ++ if (npath) { ++ ext3_ext_drop_refs(npath); ++ kfree(npath); ++ } ++ ext3_ext_tree_changed(inode); ++ ext3_ext_invalidate_cache(inode); ++ return err; ++} ++ ++int ext3_ext_walk_space(struct inode *inode, unsigned long block, ++ unsigned long num, ext_prepare_callback func, ++ void *cbdata) ++{ ++ struct ext3_ext_path *path = NULL; ++ struct ext3_ext_cache cbex; ++ struct ext3_extent *ex; ++ unsigned long next, start = 0, end = 0; ++ unsigned long last = block + num; ++ int depth, exists, err = 0; ++ ++ BUG_ON(func == NULL); ++ BUG_ON(inode == NULL); ++ ++ while (block < last && block != EXT_MAX_BLOCK) { ++ num = last - block; ++ /* find extent for this block */ ++ path = ext3_ext_find_extent(inode, block, path); ++ if (IS_ERR(path)) { ++ err = PTR_ERR(path); ++ path = NULL; ++ break; ++ } ++ ++ depth = ext_depth(inode); ++ BUG_ON(path[depth].p_hdr == NULL); ++ ex = path[depth].p_ext; ++ next = ext3_ext_next_allocated_block(path); ++ ++ exists = 0; ++ if (!ex) { ++ /* there is no extent yet, so try to allocate ++ * all requested space */ ++ start = block; ++ end = block + num; ++ } else if (le32_to_cpu(ex->ee_block) > block) { ++ /* need to allocate space before found extent */ ++ start = block; ++ end = le32_to_cpu(ex->ee_block); ++ if (block + num < end) ++ end = block + num; ++ } else if (block >= ++ le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) { ++ /* need to allocate space after found extent */ ++ start = block; ++ end = block + num; ++ if (end >= next) ++ end = next; ++ } else if (block >= le32_to_cpu(ex->ee_block)) { ++ /* ++ * some part of requested space is covered ++ * by found extent ++ */ ++ start = block; ++ end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len); ++ if (block + num < end) ++ end = block + num; ++ exists = 1; ++ } else { ++ BUG(); ++ } ++ BUG_ON(end <= start); ++ ++ if (!exists) { ++ cbex.ec_block = start; ++ cbex.ec_len = end - start; ++ cbex.ec_start = 0; ++ cbex.ec_type = EXT3_EXT_CACHE_GAP; ++ } else { ++ cbex.ec_block = le32_to_cpu(ex->ee_block); ++ cbex.ec_len = le16_to_cpu(ex->ee_len); ++ cbex.ec_start = le32_to_cpu(ex->ee_start); ++ cbex.ec_type = EXT3_EXT_CACHE_EXTENT; ++ } ++ ++ BUG_ON(cbex.ec_len == 0); ++ err = func(inode, path, &cbex, cbdata); ++ ext3_ext_drop_refs(path); ++ ++ if (err < 0) ++ break; ++ if (err == EXT_REPEAT) ++ continue; ++ else if (err == EXT_BREAK) { ++ err = 0; ++ break; ++ } ++ ++ if (ext_depth(inode) != depth) { ++ /* depth was changed. we have to realloc path */ ++ kfree(path); ++ path = NULL; ++ } ++ ++ block = cbex.ec_block + cbex.ec_len; ++ } ++ ++ if (path) { ++ ext3_ext_drop_refs(path); ++ kfree(path); ++ } ++ ++ return err; ++} ++ ++static inline void ++ext3_ext_put_in_cache(struct inode *inode, __u32 block, ++ __u32 len, __u32 start, int type) ++{ ++ struct ext3_ext_cache *cex; ++ BUG_ON(len == 0); ++ cex = &EXT3_I(inode)->i_cached_extent; ++ cex->ec_type = type; ++ cex->ec_block = block; ++ cex->ec_len = len; ++ cex->ec_start = start; ++} ++ ++/* ++ * this routine calculate boundaries of the gap requested block fits into ++ * and cache this gap ++ */ ++static inline void ++ext3_ext_put_gap_in_cache(struct inode *inode, struct ext3_ext_path *path, ++ unsigned long block) ++{ ++ int depth = ext_depth(inode); ++ unsigned long lblock, len; ++ struct ext3_extent *ex; ++ ++ ex = path[depth].p_ext; ++ if (ex == NULL) { ++ /* there is no extent yet, so gap is [0;-] */ ++ lblock = 0; ++ len = EXT_MAX_BLOCK; ++ ext_debug(inode, "cache gap(whole file):"); ++ } else if (block < le32_to_cpu(ex->ee_block)) { ++ lblock = block; ++ len = le32_to_cpu(ex->ee_block) - block; ++ ext_debug(inode, "cache gap(before): %lu [%lu:%lu]", ++ (unsigned long) block, ++ (unsigned long) le32_to_cpu(ex->ee_block), ++ (unsigned long) le16_to_cpu(ex->ee_len)); ++ } else if (block >= le32_to_cpu(ex->ee_block) ++ + le16_to_cpu(ex->ee_len)) { ++ lblock = le32_to_cpu(ex->ee_block) ++ + le16_to_cpu(ex->ee_len); ++ len = ext3_ext_next_allocated_block(path); ++ ext_debug(inode, "cache gap(after): [%lu:%lu] %lu", ++ (unsigned long) le32_to_cpu(ex->ee_block), ++ (unsigned long) le16_to_cpu(ex->ee_len), ++ (unsigned long) block); ++ BUG_ON(len == lblock); ++ len = len - lblock; ++ } else { ++ lblock = len = 0; ++ BUG(); ++ } ++ ++ ext_debug(inode, " -> %lu:%lu\n", (unsigned long) lblock, len); ++ ext3_ext_put_in_cache(inode, lblock, len, 0, EXT3_EXT_CACHE_GAP); ++} ++ ++static inline int ++ext3_ext_in_cache(struct inode *inode, unsigned long block, ++ struct ext3_extent *ex) ++{ ++ struct ext3_ext_cache *cex; ++ ++ cex = &EXT3_I(inode)->i_cached_extent; ++ ++ /* has cache valid data? */ ++ if (cex->ec_type == EXT3_EXT_CACHE_NO) ++ return EXT3_EXT_CACHE_NO; ++ ++ BUG_ON(cex->ec_type != EXT3_EXT_CACHE_GAP && ++ cex->ec_type != EXT3_EXT_CACHE_EXTENT); ++ if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { ++ ex->ee_block = cpu_to_le32(cex->ec_block); ++ ex->ee_start = cpu_to_le32(cex->ec_start); ++ ex->ee_start_hi = 0; ++ ex->ee_len = cpu_to_le16(cex->ec_len); ++ ext_debug(inode, "%lu cached by %lu:%lu:%lu\n", ++ (unsigned long) block, ++ (unsigned long) cex->ec_block, ++ (unsigned long) cex->ec_len, ++ (unsigned long) cex->ec_start); ++ return cex->ec_type; ++ } ++ ++ /* not in cache */ ++ return EXT3_EXT_CACHE_NO; ++} ++ ++/* ++ * routine removes index from the index block ++ * it's used in truncate case only. thus all requests are for ++ * last index in the block only ++ */ ++int ext3_ext_rm_idx(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ struct buffer_head *bh; ++ int err; ++ unsigned long leaf; ++ ++ /* free index block */ ++ path--; ++ leaf = le32_to_cpu(path->p_idx->ei_leaf); ++ BUG_ON(path->p_hdr->eh_entries == 0); ++ if ((err = ext3_ext_get_access(handle, inode, path))) ++ return err; ++ path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); ++ if ((err = ext3_ext_dirty(handle, inode, path))) ++ return err; ++ ext_debug(inode, "index is empty, remove it, free block %lu\n", leaf); ++ bh = sb_find_get_block(inode->i_sb, leaf); ++ ext3_forget(handle, 1, inode, bh, leaf); ++ ext3_free_blocks(handle, inode, leaf, 1); ++ return err; ++} ++ ++/* ++ * This routine returns max. credits extent tree can consume. ++ * It should be OK for low-performance paths like ->writepage() ++ * To allow many writing process to fit a single transaction, ++ * caller should calculate credits under truncate_mutex and ++ * pass actual path. ++ */ ++int inline ext3_ext_calc_credits_for_insert(struct inode *inode, ++ struct ext3_ext_path *path) ++{ ++ int depth, needed; ++ ++ if (path) { ++ /* probably there is space in leaf? */ ++ depth = ext_depth(inode); ++ if (le16_to_cpu(path[depth].p_hdr->eh_entries) ++ < le16_to_cpu(path[depth].p_hdr->eh_max)) ++ return 1; ++ } ++ ++ /* ++ * given 32bit logical block (4294967296 blocks), max. tree ++ * can be 4 levels in depth -- 4 * 340^4 == 53453440000. ++ * let's also add one more level for imbalance. ++ */ ++ depth = 5; ++ ++ /* allocation of new data block(s) */ ++ needed = 2; ++ ++ /* ++ * tree can be full, so it'd need to grow in depth: ++ * we need one credit to modify old root, credits for ++ * new root will be added in split accounting ++ */ ++ needed += 1; ++ ++ /* ++ * Index split can happen, we'd need: ++ * allocate intermediate indexes (bitmap + group) ++ * + change two blocks at each level, but root (already included) ++ */ ++ needed += (depth * 2) + (depth * 2); ++ ++ /* any allocation modifies superblock */ ++ needed += 1; ++ ++ return needed; ++} ++ ++static int ext3_remove_blocks(handle_t *handle, struct inode *inode, ++ struct ext3_extent *ex, ++ unsigned long from, unsigned long to) ++{ ++ struct buffer_head *bh; ++ int i; ++ ++#ifdef EXTENTS_STATS ++ { ++ struct ext3_sb_info *sbi = EXT3_SB(inode->i_sb); ++ unsigned short ee_len = le16_to_cpu(ex->ee_len); ++ spin_lock(&sbi->s_ext_stats_lock); ++ sbi->s_ext_blocks += ee_len; ++ sbi->s_ext_extents++; ++ if (ee_len < sbi->s_ext_min) ++ sbi->s_ext_min = ee_len; ++ if (ee_len > sbi->s_ext_max) ++ sbi->s_ext_max = ee_len; ++ if (ext_depth(inode) > sbi->s_depth_max) ++ sbi->s_depth_max = ext_depth(inode); ++ spin_unlock(&sbi->s_ext_stats_lock); ++ } ++#endif ++ if (from >= le32_to_cpu(ex->ee_block) ++ && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { ++ /* tail removal */ ++ unsigned long num, start; ++ num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from; ++ start = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - num; ++ ext_debug(inode, "free last %lu blocks starting %lu\n", num, start); ++ for (i = 0; i < num; i++) { ++ bh = sb_find_get_block(inode->i_sb, start + i); ++ ext3_forget(handle, 0, inode, bh, start + i); ++ } ++ ext3_free_blocks(handle, inode, start, num); ++ } else if (from == le32_to_cpu(ex->ee_block) ++ && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { ++ printk("strange request: removal %lu-%lu from %u:%u\n", ++ from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); ++ } else { ++ printk("strange request: removal(2) %lu-%lu from %u:%u\n", ++ from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); ++ } ++ return 0; ++} ++ ++static int ++ext3_ext_rm_leaf(handle_t *handle, struct inode *inode, ++ struct ext3_ext_path *path, unsigned long start) ++{ ++ int err = 0, correct_index = 0; ++ int depth = ext_depth(inode), credits; ++ struct ext3_extent_header *eh; ++ unsigned a, b, block, num; ++ unsigned long ex_ee_block; ++ unsigned short ex_ee_len; ++ struct ext3_extent *ex; ++ ++ /* the header must be checked already in ext3_ext_remove_space() */ ++ ext_debug(inode, "truncate since %lu in leaf\n", start); ++ if (!path[depth].p_hdr) ++ path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); ++ eh = path[depth].p_hdr; ++ BUG_ON(eh == NULL); ++ ++ /* find where to start removing */ ++ ex = EXT_LAST_EXTENT(eh); ++ ++ ex_ee_block = le32_to_cpu(ex->ee_block); ++ ex_ee_len = le16_to_cpu(ex->ee_len); ++ ++ while (ex >= EXT_FIRST_EXTENT(eh) && ++ ex_ee_block + ex_ee_len > start) { ++ ext_debug(inode, "remove ext %lu:%u\n", ex_ee_block, ex_ee_len); ++ path[depth].p_ext = ex; ++ ++ a = ex_ee_block > start ? ex_ee_block : start; ++ b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? ++ ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; ++ ++ ext_debug(inode, " border %u:%u\n", a, b); ++ ++ if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { ++ block = 0; ++ num = 0; ++ BUG(); ++ } else if (a != ex_ee_block) { ++ /* remove tail of the extent */ ++ block = ex_ee_block; ++ num = a - block; ++ } else if (b != ex_ee_block + ex_ee_len - 1) { ++ /* remove head of the extent */ ++ block = a; ++ num = b - a; ++ /* there is no "make a hole" API yet */ ++ BUG(); ++ } else { ++ /* remove whole extent: excellent! */ ++ block = ex_ee_block; ++ num = 0; ++ BUG_ON(a != ex_ee_block); ++ BUG_ON(b != ex_ee_block + ex_ee_len - 1); ++ } ++ ++ /* at present, extent can't cross block group */ ++ /* leaf + bitmap + group desc + sb + inode */ ++ credits = 5; ++ if (ex == EXT_FIRST_EXTENT(eh)) { ++ correct_index = 1; ++ credits += (ext_depth(inode)) + 1; ++ } ++#ifdef CONFIG_QUOTA ++ credits += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); ++#endif ++ ++ handle = ext3_ext_journal_restart(handle, credits); ++ if (IS_ERR(handle)) { ++ err = PTR_ERR(handle); ++ goto out; ++ } ++ ++ err = ext3_ext_get_access(handle, inode, path + depth); ++ if (err) ++ goto out; ++ ++ err = ext3_remove_blocks(handle, inode, ex, a, b); ++ if (err) ++ goto out; ++ ++ if (num == 0) { ++ /* this extent is removed entirely mark slot unused */ ++ ex->ee_start = ex->ee_start_hi = 0; ++ eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); ++ } ++ ++ ex->ee_block = cpu_to_le32(block); ++ ex->ee_len = cpu_to_le16(num); ++ ++ err = ext3_ext_dirty(handle, inode, path + depth); ++ if (err) ++ goto out; ++ ++ ext_debug(inode, "new extent: %u:%u:%u\n", block, num, ++ le32_to_cpu(ex->ee_start)); ++ ex--; ++ ex_ee_block = le32_to_cpu(ex->ee_block); ++ ex_ee_len = le16_to_cpu(ex->ee_len); ++ } ++ ++ if (correct_index && eh->eh_entries) ++ err = ext3_ext_correct_indexes(handle, inode, path); ++ ++ /* if this leaf is free, then we should ++ * remove it from index block above */ ++ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) ++ err = ext3_ext_rm_idx(handle, inode, path + depth); ++ ++out: ++ return err; ++} ++ ++/* ++ * returns 1 if current index have to be freed (even partial) ++ */ ++static int inline ++ext3_ext_more_to_rm(struct ext3_ext_path *path) ++{ ++ BUG_ON(path->p_idx == NULL); ++ ++ if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) ++ return 0; ++ ++ /* ++ * if truncate on deeper level happened it it wasn't partial ++ * so we have to consider current index for truncation ++ */ ++ if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) ++ return 0; ++ return 1; ++} ++ ++int ext3_ext_remove_space(struct inode *inode, unsigned long start) ++{ ++ struct super_block *sb = inode->i_sb; ++ int depth = ext_depth(inode); ++ struct ext3_ext_path *path; ++ handle_t *handle; ++ int i = 0, err = 0; ++ ++ ext_debug(inode, "truncate since %lu\n", start); ++ ++ /* probably first extent we're gonna free will be last in block */ ++ handle = ext3_journal_start(inode, depth + 1); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ ++ ext3_ext_invalidate_cache(inode); ++ ++ /* ++ * we start scanning from right side freeing all the blocks ++ * after i_size and walking into the deep ++ */ ++ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL); ++ if (path == NULL) { ++ ext3_journal_stop(handle); ++ return -ENOMEM; ++ } ++ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1)); ++ path[0].p_hdr = ext_inode_hdr(inode); ++ if (ext3_ext_check_header(inode, path[0].p_hdr, depth)) { ++ err = -EIO; ++ goto out; ++ } ++ path[0].p_depth = depth; ++ ++ while (i >= 0 && err == 0) { ++ if (i == depth) { ++ /* this is leaf block */ ++ err = ext3_ext_rm_leaf(handle, inode, path, start); ++ /* root level have p_bh == NULL, brelse() eats this */ ++ brelse(path[i].p_bh); ++ path[i].p_bh = NULL; ++ i--; ++ continue; ++ } ++ ++ /* this is index block */ ++ if (!path[i].p_hdr) { ++ ext_debug(inode, "initialize header\n"); ++ path[i].p_hdr = ext_block_hdr(path[i].p_bh); ++ } ++ ++ if (!path[i].p_idx) { ++ /* this level hasn't touched yet */ ++ path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); ++ path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; ++ ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", ++ path[i].p_hdr, ++ le16_to_cpu(path[i].p_hdr->eh_entries)); ++ } else { ++ /* we've already was here, see at next index */ ++ path[i].p_idx--; ++ } ++ ++ ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", ++ i, EXT_FIRST_INDEX(path[i].p_hdr), ++ path[i].p_idx); ++ if (ext3_ext_more_to_rm(path + i)) { ++ struct buffer_head *bh; ++ /* go to the next level */ ++ ext_debug(inode, "move to level %d (block %d)\n", ++ i + 1, le32_to_cpu(path[i].p_idx->ei_leaf)); ++ memset(path + i + 1, 0, sizeof(*path)); ++ bh = sb_bread(sb, le32_to_cpu(path[i].p_idx->ei_leaf)); ++ if (!bh) { ++ /* should we reset i_size? */ ++ err = -EIO; ++ break; ++ } ++ BUG_ON(i + 1 > depth); ++ if (ext3_ext_check_header(inode, ext_block_hdr(bh), ++ depth - i - 1)) { ++ err = -EIO; ++ break; ++ } ++ path[i+1].p_bh = bh; ++ ++ /* put actual number of indexes to know is this ++ * number got changed at the next iteration */ ++ path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); ++ i++; ++ } else { ++ /* we finish processing this index, go up */ ++ if (path[i].p_hdr->eh_entries == 0 && i > 0) { ++ /* index is empty, remove it ++ * handle must be already prepared by the ++ * truncatei_leaf() */ ++ err = ext3_ext_rm_idx(handle, inode, path + i); ++ } ++ /* root level have p_bh == NULL, brelse() eats this */ ++ brelse(path[i].p_bh); ++ path[i].p_bh = NULL; ++ i--; ++ ext_debug(inode, "return to level %d\n", i); ++ } ++ } ++ ++ /* TODO: flexible tree reduction should be here */ ++ if (path->p_hdr->eh_entries == 0) { ++ /* ++ * truncate to zero freed all the tree ++ * so, we need to correct eh_depth ++ */ ++ err = ext3_ext_get_access(handle, inode, path); ++ if (err == 0) { ++ ext_inode_hdr(inode)->eh_depth = 0; ++ ext_inode_hdr(inode)->eh_max = ++ cpu_to_le16(ext3_ext_space_root(inode)); ++ err = ext3_ext_dirty(handle, inode, path); ++ } ++ } ++out: ++ ext3_ext_tree_changed(inode); ++ ext3_ext_drop_refs(path); ++ kfree(path); ++ ext3_journal_stop(handle); ++ ++ return err; ++} ++ ++/* ++ * called at mount time ++ */ ++void ext3_ext_init(struct super_block *sb) ++{ ++ /* ++ * possible initialization would be here ++ */ ++ ++ if (test_opt(sb, EXTENTS)) { ++ printk("EXT3-fs: file extents enabled"); ++#ifdef AGRESSIVE_TEST ++ printk(", agressive tests"); ++#endif ++#ifdef CHECK_BINSEARCH ++ printk(", check binsearch"); ++#endif ++#ifdef EXTENTS_STATS ++ printk(", stats"); ++#endif ++ printk("\n"); ++#ifdef EXTENTS_STATS ++ spin_lock_init(&EXT3_SB(sb)->s_ext_stats_lock); ++ EXT3_SB(sb)->s_ext_min = 1 << 30; ++ EXT3_SB(sb)->s_ext_max = 0; ++#endif ++ } ++} ++ ++/* ++ * called at umount time ++ */ ++void ext3_ext_release(struct super_block *sb) ++{ ++ if (!test_opt(sb, EXTENTS)) ++ return; ++ ++#ifdef EXTENTS_STATS ++ if (EXT3_SB(sb)->s_ext_blocks && EXT3_SB(sb)->s_ext_extents) { ++ struct ext3_sb_info *sbi = EXT3_SB(sb); ++ printk(KERN_ERR "EXT3-fs: %lu blocks in %lu extents (%lu ave)\n", ++ sbi->s_ext_blocks, sbi->s_ext_extents, ++ sbi->s_ext_blocks / sbi->s_ext_extents); ++ printk(KERN_ERR "EXT3-fs: extents: %lu min, %lu max, max depth %lu\n", ++ sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); ++ } ++#endif ++} ++ ++int ext3_ext_get_blocks(handle_t *handle, struct inode *inode, sector_t iblock, ++ unsigned long max_blocks, struct buffer_head *bh_result, ++ int create, int extend_disksize) ++{ ++ struct ext3_ext_path *path = NULL; ++ struct ext3_extent newex, *ex; ++ int goal, newblock, err = 0, depth; ++ unsigned long allocated = 0; ++ unsigned long next; ++ ++ __clear_bit(BH_New, &bh_result->b_state); ++ ext_debug(inode, "blocks %d/%lu requested for inode %u\n", (int) iblock, ++ max_blocks, (unsigned) inode->i_ino); ++ mutex_lock(&EXT3_I(inode)->truncate_mutex); ++ ++ /* check in cache */ ++ if ((goal = ext3_ext_in_cache(inode, iblock, &newex))) { ++ if (goal == EXT3_EXT_CACHE_GAP) { ++ if (!create) { ++ /* block isn't allocated yet and ++ * user don't want to allocate it */ ++ goto out2; ++ } ++ /* we should allocate requested block */ ++ } else if (goal == EXT3_EXT_CACHE_EXTENT) { ++ /* block is already allocated */ ++ newblock = iblock ++ - le32_to_cpu(newex.ee_block) ++ + le32_to_cpu(newex.ee_start); ++ /* number of remain blocks in the extent */ ++ BUG_ON(iblock < le32_to_cpu(newex.ee_block)); ++ allocated = le16_to_cpu(newex.ee_len) - ++ (iblock - le32_to_cpu(newex.ee_block)); ++ goto out; ++ } else { ++ BUG(); ++ } ++ } ++ ++ /* find extent for this block */ ++ path = ext3_ext_find_extent(inode, iblock, NULL); ++ if (IS_ERR(path)) { ++ err = PTR_ERR(path); ++ path = NULL; ++ goto out2; ++ } ++ ++ depth = ext_depth(inode); ++ ++ /* ++ * consistent leaf must not be empty ++ * this situations is possible, though, _during_ tree modification ++ * this is why assert can't be put in ext3_ext_find_extent() ++ */ ++ BUG_ON(path[depth].p_ext == NULL && depth != 0); ++ ++ if ((ex = path[depth].p_ext)) { ++ unsigned long ee_block = le32_to_cpu(ex->ee_block); ++ unsigned long ee_start = le32_to_cpu(ex->ee_start); ++ unsigned short ee_len = le16_to_cpu(ex->ee_len); ++ /* if found exent covers block, simple return it */ ++ if (iblock >= ee_block && iblock < ee_block + ee_len) { ++ newblock = iblock - ee_block + ee_start; ++ /* number of remain blocks in the extent */ ++ allocated = ee_len - (iblock - ee_block); ++ ext_debug(inode, "%d fit into %lu:%d -> %d\n", (int) iblock, ++ ee_block, ee_len, newblock); ++ ext3_ext_put_in_cache(inode, ee_block, ee_len, ++ ee_start, EXT3_EXT_CACHE_EXTENT); ++ goto out; ++ } ++ } ++ ++ /* ++ * requested block isn't allocated yet ++ * we couldn't try to create block if create flag is zero ++ */ ++ if (!create) { ++ /* put just found gap into cache to speedup subsequest reqs */ ++ ext3_ext_put_gap_in_cache(inode, path, iblock); ++ goto out2; ++ } ++ ++ /* ++ * Okay, we need to do block allocation. Lazily initialize the block ++ * allocation info here if necessary ++ */ ++ if (S_ISREG(inode->i_mode) && (!EXT3_I(inode)->i_block_alloc_info)) ++ ext3_init_block_alloc_info(inode); ++ ++ /* find next allocated block so that we know how many ++ * blocks we can allocate without ovelapping next extent */ ++ BUG_ON(iblock < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)); ++ next = ext3_ext_next_allocated_block(path); ++ BUG_ON(next <= iblock); ++ allocated = next - iblock; ++ if (allocated > max_blocks) ++ allocated = max_blocks; ++ ++ /* allocate new block */ ++ goal = ext3_ext_find_goal(inode, path, iblock); ++ newblock = ext3_new_blocks(handle, inode, goal, &allocated, &err); ++ if (!newblock) ++ goto out2; ++ ext_debug(inode, "allocate new block: goal %d, found %d/%lu\n", ++ goal, newblock, allocated); ++ ++ /* try to insert new extent into found leaf and return */ ++ newex.ee_block = cpu_to_le32(iblock); ++ newex.ee_start = cpu_to_le32(newblock); ++ newex.ee_start_hi = 0; ++ newex.ee_len = cpu_to_le16(allocated); ++ err = ext3_ext_insert_extent(handle, inode, path, &newex); ++ if (err) { ++ /* free data blocks we just allocated */ ++ ext3_free_blocks(handle, inode, le32_to_cpu(newex.ee_start), ++ le16_to_cpu(newex.ee_len)); ++ goto out2; ++ } ++ ++ if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize) ++ EXT3_I(inode)->i_disksize = inode->i_size; ++ ++ /* previous routine could use block we allocated */ ++ newblock = le32_to_cpu(newex.ee_start); ++ __set_bit(BH_New, &bh_result->b_state); ++ ++ ext3_ext_put_in_cache(inode, iblock, allocated, newblock, ++ EXT3_EXT_CACHE_EXTENT); ++out: ++ if (allocated > max_blocks) ++ allocated = max_blocks; ++ ext3_ext_show_leaf(inode, path); ++ __set_bit(BH_Mapped, &bh_result->b_state); ++ bh_result->b_bdev = inode->i_sb->s_bdev; ++ bh_result->b_blocknr = newblock; ++ bh_result->b_size = (allocated << inode->i_blkbits); ++out2: ++ if (path) { ++ ext3_ext_drop_refs(path); ++ kfree(path); ++ } ++ mutex_unlock(&EXT3_I(inode)->truncate_mutex); ++ ++ return err ? err : allocated; ++} ++ ++void ext3_ext_truncate(struct inode * inode, struct page *page) ++{ ++ struct address_space *mapping = inode->i_mapping; ++ struct super_block *sb = inode->i_sb; ++ unsigned long last_block; ++ handle_t *handle; ++ int err = 0; ++ ++ /* ++ * probably first extent we're gonna free will be last in block ++ */ ++ err = ext3_writepage_trans_blocks(inode) + 3; ++ handle = ext3_journal_start(inode, err); ++ if (IS_ERR(handle)) { ++ if (page) { ++ clear_highpage(page); ++ flush_dcache_page(page); ++ unlock_page(page); ++ page_cache_release(page); ++ } ++ return; ++ } ++ ++ if (page) ++ ext3_block_truncate_page(handle, page, mapping, inode->i_size); ++ ++ mutex_lock(&EXT3_I(inode)->truncate_mutex); ++ ext3_ext_invalidate_cache(inode); ++ ++ /* ++ * TODO: optimization is possible here ++ * probably we need not scaning at all, ++ * because page truncation is enough ++ */ ++ if (ext3_orphan_add(handle, inode)) ++ goto out_stop; ++ ++ /* we have to know where to truncate from in crash case */ ++ EXT3_I(inode)->i_disksize = inode->i_size; ++ ext3_mark_inode_dirty(handle, inode); ++ ++ last_block = (inode->i_size + sb->s_blocksize - 1) ++ >> EXT3_BLOCK_SIZE_BITS(sb); ++ err = ext3_ext_remove_space(inode, last_block); ++ ++ /* In a multi-transaction truncate, we only make the final ++ * transaction synchronous */ ++ if (IS_SYNC(inode)) ++ handle->h_sync = 1; ++ ++out_stop: ++ /* ++ * If this was a simple ftruncate(), and the file will remain alive ++ * then we need to clear up the orphan record which we created above. ++ * However, if this was a real unlink then we were called by ++ * ext3_delete_inode(), and we allow that function to clean up the ++ * orphan info for us. ++ */ ++ if (inode->i_nlink) ++ ext3_orphan_del(handle, inode); ++ ++ mutex_unlock(&EXT3_I(inode)->truncate_mutex); ++ ext3_journal_stop(handle); ++} ++ ++/* ++ * this routine calculate max number of blocks we could modify ++ * in order to allocate new block for an inode ++ */ ++int ext3_ext_writepage_trans_blocks(struct inode *inode, int num) ++{ ++ int needed; ++ ++ needed = ext3_ext_calc_credits_for_insert(inode, NULL); ++ ++ /* caller want to allocate num blocks, but note it includes sb */ ++ needed = needed * num - (num - 1); ++ ++#ifdef CONFIG_QUOTA ++ needed += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); ++#endif ++ ++ return needed; ++} ++ ++EXPORT_SYMBOL(ext3_mark_inode_dirty); ++EXPORT_SYMBOL(ext3_ext_invalidate_cache); ++EXPORT_SYMBOL(ext3_ext_insert_extent); ++EXPORT_SYMBOL(ext3_ext_walk_space); ++EXPORT_SYMBOL(ext3_ext_find_goal); ++EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert); +Index: linux-2.6.18.8/fs/ext3/ialloc.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/ialloc.c 2007-07-17 09:18:09.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/ialloc.c 2007-07-17 11:08:09.000000000 +0200 +@@ -652,6 +652,17 @@ got: + ext3_std_error(sb, err); + goto fail_free_drop; + } ++ if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) { ++ EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL; ++ ext3_ext_tree_init(handle, inode); ++ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) { ++ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh); ++ if (err) goto fail; ++ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS); ++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata"); ++ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); ++ } ++ } + + ext3_debug("allocating inode %lu\n", inode->i_ino); + goto really_out; +Index: linux-2.6.18.8/fs/ext3/inode.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/inode.c 2007-07-17 09:18:12.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/inode.c 2007-07-17 11:08:11.000000000 +0200 +@@ -40,8 +40,6 @@ + #include "iopen.h" + #include "acl.h" + +-static int ext3_writepage_trans_blocks(struct inode *inode); +- + /* + * Test whether an inode is a fast symlink. + */ +@@ -804,6 +802,7 @@ int ext3_get_blocks_handle(handle_t *han + ext3_fsblk_t first_block = 0; + + ++ J_ASSERT(!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)); + J_ASSERT(handle != NULL || create == 0); + depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); + +@@ -984,12 +983,10 @@ static int ext3_get_block(struct inode * + + get_block: + if (ret == 0) { +- ret = ext3_get_blocks_handle(handle, inode, iblock, ++ ret = ext3_get_blocks_wrap(handle, inode, iblock, + max_blocks, bh_result, create, 0); +- if (ret > 0) { +- bh_result->b_size = (ret << inode->i_blkbits); ++ if (ret > 0) + ret = 0; +- } + } + return ret; + } +@@ -1008,7 +1005,7 @@ struct buffer_head *ext3_getblk(handle_t + dummy.b_state = 0; + dummy.b_blocknr = -1000; + buffer_trace_init(&dummy.b_history); +- err = ext3_get_blocks_handle(handle, inode, block, 1, ++ err = ext3_get_blocks_wrap(handle, inode, block, 1, + &dummy, create, 1); + /* + * ext3_get_blocks_handle() returns number of blocks +@@ -1759,7 +1756,7 @@ void ext3_set_aops(struct inode *inode) + * This required during truncate. We need to physically zero the tail end + * of that block so it doesn't yield old data if the file is later grown. + */ +-static int ext3_block_truncate_page(handle_t *handle, struct page *page, ++int ext3_block_truncate_page(handle_t *handle, struct page *page, + struct address_space *mapping, loff_t from) + { + ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; +@@ -2263,6 +2260,9 @@ void ext3_truncate(struct inode *inode) + return; + } + ++ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) ++ return ext3_ext_truncate(inode, page); ++ + handle = start_transaction(inode); + if (IS_ERR(handle)) { + if (page) { +@@ -3008,12 +3008,15 @@ err_out: + * block and work out the exact number of indirects which are touched. Pah. + */ + +-static int ext3_writepage_trans_blocks(struct inode *inode) ++int ext3_writepage_trans_blocks(struct inode *inode) + { + int bpp = ext3_journal_blocks_per_page(inode); + int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; + int ret; + ++ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) ++ return ext3_ext_writepage_trans_blocks(inode, bpp); ++ + if (ext3_should_journal_data(inode)) + ret = 3 * (bpp + indirects) + 2; + else +@@ -3260,7 +3263,7 @@ int ext3_map_inode_page(struct inode *in + if (blocks[i] != 0) + continue; + +- rc = ext3_get_blocks_handle(handle, inode, iblock, 1, &dummy, 1, 1); ++ rc = ext3_get_blocks_wrap(handle, inode, iblock, 1, &dummy, 1, 1); + if (rc < 0) { + printk(KERN_INFO "ext3_map_inode_page: error reading " + "block %ld\n", iblock); +Index: linux-2.6.18.8/fs/ext3/Makefile +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/Makefile 2007-07-17 09:18:11.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/Makefile 2007-07-17 11:08:11.000000000 +0200 +@@ -5,7 +5,8 @@ + obj-$(CONFIG_EXT3_FS) += ext3.o + + ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \ +- ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o ++ ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \ ++ extents.o + + ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o + ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o +Index: linux-2.6.18.8/fs/ext3/super.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/super.c 2007-07-17 09:18:12.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/super.c 2007-07-17 11:08:12.000000000 +0200 +@@ -391,6 +391,7 @@ static void ext3_put_super (struct super + struct ext3_super_block *es = sbi->s_es; + int i; + ++ ext3_ext_release(sb); + ext3_xattr_put_super(sb); + journal_destroy(sbi->s_journal); + if (!(sb->s_flags & MS_RDONLY)) { +@@ -455,6 +456,8 @@ static struct inode *ext3_alloc_inode(st + #endif + ei->i_block_alloc_info = NULL; + ei->vfs_inode.i_version = 1; ++ ++ memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent)); + return &ei->vfs_inode; + } + +@@ -680,7 +683,8 @@ enum { + Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, + Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota, + Opt_iopen, Opt_noiopen, Opt_iopen_nopriv, +- Opt_grpquota ++ Opt_grpquota, ++ Opt_extents, Opt_noextents, Opt_extdebug, + }; + + static match_table_t tokens = { +@@ -733,6 +737,9 @@ static match_table_t tokens = { + {Opt_noiopen, "noiopen"}, + {Opt_iopen_nopriv, "iopen_nopriv"}, + {Opt_barrier, "barrier=%u"}, ++ {Opt_extents, "extents"}, ++ {Opt_noextents, "noextents"}, ++ {Opt_extdebug, "extdebug"}, + {Opt_err, NULL}, + {Opt_resize, "resize"}, + }; +@@ -1077,6 +1084,15 @@ clear_qf_name: + case Opt_bh: + clear_opt(sbi->s_mount_opt, NOBH); + break; ++ case Opt_extents: ++ set_opt (sbi->s_mount_opt, EXTENTS); ++ break; ++ case Opt_noextents: ++ clear_opt (sbi->s_mount_opt, EXTENTS); ++ break; ++ case Opt_extdebug: ++ set_opt (sbi->s_mount_opt, EXTDEBUG); ++ break; + default: + printk (KERN_ERR + "EXT3-fs: Unrecognized mount option \"%s\" " +@@ -1806,6 +1822,8 @@ static int ext3_fill_super (struct super + test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": + "writeback"); + ++ ext3_ext_init(sb); ++ + lock_kernel(); + return 0; + +Index: linux-2.6.18.8/include/linux/ext3_extents.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.18.8/include/linux/ext3_extents.h 2007-07-17 09:18:14.000000000 +0200 +@@ -0,0 +1,231 @@ ++/* ++ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com ++ * Written by Alex Tomas ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public Licens ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- ++ */ ++ ++#ifndef _LINUX_EXT3_EXTENTS ++#define _LINUX_EXT3_EXTENTS ++ ++#include ++ ++/* ++ * with AGRESSIVE_TEST defined capacity of index/leaf blocks ++ * become very little, so index split, in-depth growing and ++ * other hard changes happens much more often ++ * this is for debug purposes only ++ */ ++#define AGRESSIVE_TEST_ ++ ++/* ++ * with EXTENTS_STATS defined number of blocks and extents ++ * are collected in truncate path. they'll be showed at ++ * umount time ++ */ ++#define EXTENTS_STATS__ ++ ++/* ++ * if CHECK_BINSEARCH defined, then results of binary search ++ * will be checked by linear search ++ */ ++#define CHECK_BINSEARCH__ ++ ++/* ++ * if EXT_DEBUG is defined you can use 'extdebug' mount option ++ * to get lots of info what's going on ++ */ ++#define EXT_DEBUG_ ++#ifdef EXT_DEBUG ++#define ext_debug(inode,fmt,a...) \ ++do { \ ++ if (test_opt(inode->i_sb, EXTDEBUG)) \ ++ printk(fmt, ##a); \ ++} while (0); ++#else ++#define ext_debug(inode,fmt,a...) ++#endif ++ ++ ++/* ++ * if EXT_STATS is defined then stats numbers are collected ++ * these number will be displayed at umount time ++ */ ++#define EXT_STATS_ ++ ++/* ++ * define EXT3_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb ++ * are now accounted in ext3_ext_calc_credits_for_insert() ++ */ ++#define EXT3_ALLOC_NEEDED 0 ++ ++/* ++ * ext3_inode has i_block array (60 bytes total) ++ * first 12 bytes store ext3_extent_header ++ * the remain stores array of ext3_extent ++ */ ++ ++/* ++ * this is extent on-disk structure ++ * it's used at the bottom of the tree ++ */ ++struct ext3_extent { ++ __le32 ee_block; /* first logical block extent covers */ ++ __le16 ee_len; /* number of blocks covered by extent */ ++ __le16 ee_start_hi; /* high 16 bits of physical block */ ++ __le32 ee_start; /* low 32 bigs of physical block */ ++}; ++ ++/* ++ * this is index on-disk structure ++ * it's used at all the levels, but the bottom ++ */ ++struct ext3_extent_idx { ++ __le32 ei_block; /* index covers logical blocks from 'block' */ ++ __le32 ei_leaf; /* pointer to the physical block of the next * ++ * level. leaf or next index could bet here */ ++ __le16 ei_leaf_hi; /* high 16 bits of physical block */ ++ __u16 ei_unused; ++}; ++ ++/* ++ * each block (leaves and indexes), even inode-stored has header ++ */ ++struct ext3_extent_header { ++ __le16 eh_magic; /* probably will support different formats */ ++ __le16 eh_entries; /* number of valid entries */ ++ __le16 eh_max; /* capacity of store in entries */ ++ __le16 eh_depth; /* has tree real underlaying blocks? */ ++ __le32 eh_generation; /* flags(8 bits) | generation of the tree */ ++}; ++ ++#define EXT3_EXT_MAGIC 0xf30a ++ ++/* ++ * array of ext3_ext_path contains path to some extent ++ * creation/lookup routines use it for traversal/splitting/etc ++ * truncate uses it to simulate recursive walking ++ */ ++struct ext3_ext_path { ++ __u32 p_block; ++ __u16 p_depth; ++ struct ext3_extent *p_ext; ++ struct ext3_extent_idx *p_idx; ++ struct ext3_extent_header *p_hdr; ++ struct buffer_head *p_bh; ++}; ++ ++/* ++ * structure for external API ++ */ ++ ++#define EXT3_EXT_CACHE_NO 0 ++#define EXT3_EXT_CACHE_GAP 1 ++#define EXT3_EXT_CACHE_EXTENT 2 ++#define EXT3_EXT_HAS_NO_TREE /* ext3_extents_tree struct is not used*/ ++ ++/* ++ * to be called by ext3_ext_walk_space() ++ * negative retcode - error ++ * positive retcode - signal for ext3_ext_walk_space(), see below ++ * callback must return valid extent (passed or newly created) ++ */ ++typedef int (*ext_prepare_callback)(struct inode *, struct ext3_ext_path *, ++ struct ext3_ext_cache *, ++ void *); ++ ++#define EXT_CONTINUE 0 ++#define EXT_BREAK 1 ++#define EXT_REPEAT 2 ++ ++ ++#define EXT_MAX_BLOCK 0xffffffff ++ ++#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */ ++#define EXT_HDR_GEN_BITS 24 ++#define EXT_HDR_GEN_MASK ((1 << EXT_HDR_GEN_BITS) - 1) ++ ++#define EXT_FIRST_EXTENT(__hdr__) \ ++ ((struct ext3_extent *) (((char *) (__hdr__)) + \ ++ sizeof(struct ext3_extent_header))) ++#define EXT_FIRST_INDEX(__hdr__) \ ++ ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \ ++ sizeof(struct ext3_extent_header))) ++#define EXT_HAS_FREE_INDEX(__path__) \ ++ (le16_to_cpu((__path__)->p_hdr->eh_entries) \ ++ < le16_to_cpu((__path__)->p_hdr->eh_max)) ++#define EXT_LAST_EXTENT(__hdr__) \ ++ (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) ++#define EXT_LAST_INDEX(__hdr__) \ ++ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) ++#define EXT_MAX_EXTENT(__hdr__) \ ++ (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++#define EXT_MAX_INDEX(__hdr__) \ ++ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++ ++ ++static inline struct ext3_extent_header *ext_inode_hdr(struct inode *inode) ++{ ++ return (struct ext3_extent_header *) EXT3_I(inode)->i_data; ++} ++ ++static inline struct ext3_extent_header *ext_block_hdr(struct buffer_head *bh) ++{ ++ return (struct ext3_extent_header *) bh->b_data; ++} ++ ++static inline unsigned short ext_depth(struct inode *inode) ++{ ++ return le16_to_cpu(ext_inode_hdr(inode)->eh_depth); ++} ++ ++static inline unsigned short ext_flags(struct ext3_extent_header *neh) ++{ ++ return le16_to_cpu(neh->eh_generation) >> EXT_HDR_GEN_BITS; ++} ++ ++static inline unsigned short ext_hdr_gen(struct ext3_extent_header *neh) ++{ ++ return le16_to_cpu(neh->eh_generation) & EXT_HDR_GEN_MASK; ++} ++ ++static inline unsigned short ext_generation(struct inode *inode) ++{ ++ return ext_hdr_gen(ext_inode_hdr(inode)); ++} ++ ++static inline void ext3_ext_tree_changed(struct inode *inode) ++{ ++ struct ext3_extent_header *neh = ext_inode_hdr(inode); ++ neh->eh_generation = cpu_to_le32( ++ ((ext_flags(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << EXT_HDR_GEN_BITS) ++ | ((ext_hdr_gen(neh) + 1) & EXT_HDR_GEN_MASK)); ++} ++ ++static inline void ++ext3_ext_invalidate_cache(struct inode *inode) ++{ ++ EXT3_I(inode)->i_cached_extent.ec_type = EXT3_EXT_CACHE_NO; ++} ++ ++extern int ext3_ext_search_left(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *); ++extern int ext3_ext_search_right(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *); ++extern int ext3_extent_tree_init(handle_t *, struct inode *); ++extern int ext3_ext_calc_credits_for_insert(struct inode *, struct ext3_ext_path *); ++extern int ext3_ext_insert_extent(handle_t *, struct inode *, struct ext3_ext_path *, struct ext3_extent *); ++extern int ext3_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); ++extern struct ext3_ext_path * ext3_ext_find_extent(struct inode *, int, struct ext3_ext_path *); ++ ++#endif /* _LINUX_EXT3_EXTENTS */ ++ +Index: linux-2.6.18.8/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs.h 2007-07-17 09:18:13.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs.h 2007-07-17 11:08:12.000000000 +0200 +@@ -182,8 +182,10 @@ struct ext3_group_desc + #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ + #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ + #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */ ++#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */ ++#define EXT3_SUPER_MAGIC 0xEF53 + +-#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ ++#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ + #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + /* +@@ -373,6 +374,8 @@ struct ext3_inode { + #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ + #define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */ + #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */ ++#define EXT3_MOUNT_EXTENTS 0x2000000/* Extents support */ ++#define EXT3_MOUNT_EXTDEBUG 0x4000000/* Extents debug */ + + /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ + #ifndef clear_opt +@@ -572,11 +575,13 @@ static inline int ext3_valid_inum(struct + #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ + #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */ + #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010 ++#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */ + + #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR + #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \ + EXT3_FEATURE_INCOMPAT_RECOVER| \ +- EXT3_FEATURE_INCOMPAT_META_BG) ++ EXT3_FEATURE_INCOMPAT_META_BG| \ ++ EXT3_FEATURE_INCOMPAT_EXTENTS) + #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT3_FEATURE_RO_COMPAT_BTREE_DIR) +@@ -816,6 +821,9 @@ extern int ext3_get_inode_loc(struct ino + extern void ext3_truncate (struct inode *); + extern void ext3_set_inode_flags(struct inode *); + extern void ext3_set_aops(struct inode *inode); ++extern int ext3_writepage_trans_blocks(struct inode *); ++extern int ext3_block_truncate_page(handle_t *handle, struct page *page, ++ struct address_space *mapping, loff_t from); + + /* ioctl.c */ + extern int ext3_ioctl (struct inode *, struct file *, unsigned int, +@@ -869,6 +877,30 @@ extern struct inode_operations ext3_spec + extern struct inode_operations ext3_symlink_inode_operations; + extern struct inode_operations ext3_fast_symlink_inode_operations; + ++/* extents.c */ ++extern int ext3_ext_tree_init(handle_t *handle, struct inode *); ++extern int ext3_ext_writepage_trans_blocks(struct inode *, int); ++extern int ext3_ext_get_blocks(handle_t *, struct inode *, sector_t, ++ unsigned long, struct buffer_head *, int, int); ++extern void ext3_ext_truncate(struct inode *, struct page *); ++extern void ext3_ext_init(struct super_block *); ++extern void ext3_ext_release(struct super_block *); ++static inline int ++ext3_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, ++ unsigned long max_blocks, struct buffer_head *bh, ++ int create, int extend_disksize) ++{ ++ int ret; ++ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) ++ return ext3_ext_get_blocks(handle, inode, block, max_blocks, ++ bh, create, extend_disksize); ++ ret = ext3_get_blocks_handle(handle, inode, block, max_blocks, bh, create, ++ extend_disksize); ++ if (ret > 0) ++ bh->b_size = (ret << inode->i_blkbits); ++ return ret; ++} ++ + + #endif /* __KERNEL__ */ + +Index: linux-2.6.18.8/include/linux/ext3_fs_i.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/include/linux/ext3_fs_i.h 2007-07-17 11:08:11.000000000 +0200 +@@ -65,6 +65,16 @@ struct ext3_block_alloc_info { + #define rsv_end rsv_window._rsv_end + + /* ++ * storage for cached extent ++ */ ++struct ext3_ext_cache { ++ __u32 ec_start; ++ __u32 ec_block; ++ __u32 ec_len; /* must be 32bit to return holes */ ++ __u32 ec_type; ++}; ++ ++/* + * third extended file system inode data in memory + */ + struct ext3_inode_info { +@@ -142,6 +152,8 @@ struct ext3_inode_info { + */ + struct mutex truncate_mutex; + struct inode vfs_inode; ++ ++ struct ext3_ext_cache i_cached_extent; + }; + + #endif /* _LINUX_EXT3_FS_I */ +Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h 2007-07-17 11:08:12.000000000 +0200 +@@ -78,6 +78,16 @@ struct ext3_sb_info { + char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */ + int s_jquota_fmt; /* Format of quota to use */ + #endif ++ ++#ifdef EXTENTS_STATS ++ /* ext3 extents stats */ ++ unsigned long s_ext_min; ++ unsigned long s_ext_max; ++ unsigned long s_depth_max; ++ spinlock_t s_ext_stats_lock; ++ unsigned long s_ext_blocks; ++ unsigned long s_ext_extents; ++#endif + }; + + #endif /* _LINUX_EXT3_FS_SB */ +Index: linux-2.6.18.8/include/linux/ext3_jbd.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_jbd.h 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/include/linux/ext3_jbd.h 2007-07-17 09:18:14.000000000 +0200 +@@ -23,9 +23,17 @@ + * + * We may have to touch one inode, one bitmap buffer, up to three + * indirection blocks, the group and superblock summaries, and the data +- * block to complete the transaction. */ ++ * block to complete the transaction. ++ * ++ * For extents-enabled fs we may have to allocate and modify upto ++ * 5 levels of tree + root which is stored in inode. */ ++ ++#define EXT3_SINGLEDATA_TRANS_BLOCKS(sb) \ ++ (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS) \ ++ || test_opt(sb, EXTENTS) ? 27U : 8U) + +-#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U ++/* Indicate that EXT3_SINGLEDATA_TRANS_BLOCKS takes the sb as argument */ ++#define EXT3_SINGLEDATA_TRANS_BLOCKS_HAS_SB + + /* Extended attribute operations touch at most two data buffers, + * two bitmap buffers, and two group summaries, in addition to the inode +@@ -42,7 +50,7 @@ + * superblock only gets updated once, of course, so don't bother + * counting that again for the quota updates. */ + +-#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ ++#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS(sb) + \ + EXT3_XATTR_TRANS_BLOCKS - 2 + \ + 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) + +@@ -78,9 +86,9 @@ + /* Amount of blocks needed for quota insert/delete - we do some block writes + * but inode, sb and group updates are done only once */ + #define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ +- (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0) ++ (EXT3_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0) + #define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\ +- (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0) ++ (EXT3_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0) + #else + #define EXT3_QUOTA_TRANS_BLOCKS(sb) 0 + #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 diff --git a/ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch new file mode 100644 index 0000000..101d983 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-fiemap-2.6.22-vanilla.patch @@ -0,0 +1,364 @@ +Index: linux-2.6.18/fs/ext3/ioctl.c +=================================================================== +--- linux-2.6.18.orig/fs/ext3/ioctl.c ++++ linux-2.6.18/fs/ext3/ioctl.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include "fiemap.h" + + int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) +@@ -272,6 +272,9 @@ flags_err: + + return err; + } ++ case EXT3_IOC_FIEMAP: { ++ return ext3_fiemap(inode, filp, cmd, arg); ++ } + + + default: +Index: linux-2.6.18/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.18.orig/include/linux/ext3_fs.h ++++ linux-2.6.18/include/linux/ext3_fs.h +@@ -249,7 +249,6 @@ struct ext3_new_group_data { + __u32 free_blocks_count; + }; + +- + /* + * ioctl commands + */ +@@ -257,15 +256,16 @@ struct ext3_new_group_data { + #define EXT3_IOC_SETFLAGS FS_IOC_SETFLAGS + #define EXT3_IOC_GETVERSION _IOR('f', 3, long) + #define EXT3_IOC_SETVERSION _IOW('f', 4, long) +-#define EXT3_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long) ++#define EXT3_IOC_GETRSVSZ _IOR('f', 5, long) ++#define EXT3_IOC_SETRSVSZ _IOW('f', 6, long) ++#define EXT3_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long) + #define EXT3_IOC_GROUP_ADD _IOW('f', 8,struct ext3_new_group_input) ++#define EXT3_IOC_FIEMAP _IOWR('f', 10, struct fiemap) + #define EXT3_IOC_GETVERSION_OLD FS_IOC_GETVERSION + #define EXT3_IOC_SETVERSION_OLD FS_IOC_SETVERSION + #ifdef CONFIG_JBD_DEBUG + #define EXT3_IOC_WAIT_FOR_READONLY _IOR('f', 99, long) + #endif +-#define EXT3_IOC_GETRSVSZ _IOR('f', 5, long) +-#define EXT3_IOC_SETRSVSZ _IOW('f', 6, long) + + /* + * ioctl commands in 32 bit emulation +@@ -1117,6 +1117,8 @@ ext3_get_blocks_wrap(handle_t *handle, s + bh->b_size = (ret << inode->i_blkbits); + return ret; + } ++extern int ext3_fiemap(struct inode *, struct file *, unsigned int, ++ unsigned long); + + + #endif /* __KERNEL__ */ +Index: linux-2.6.18/include/linux/ext3_extents.h +=================================================================== +--- linux-2.6.18.orig/include/linux/ext3_extents.h ++++ linux-2.6.18/include/linux/ext3_extents.h +@@ -142,8 +142,9 @@ struct ext3_ext_path { + * callback must return valid extent (passed or newly created) + */ + typedef int (*ext_prepare_callback)(struct inode *, struct ext3_ext_path *, +- struct ext3_ext_cache *, +- void *); ++ struct ext3_ext_cache *, ++ struct ext3_extent *, void *); ++#define HAVE_EXT_PREPARE_CB_EXTENT + + #define EXT_CONTINUE 0 + #define EXT_BREAK 1 +@@ -152,6 +152,26 @@ typedef int (*ext_prepare_callback)(stru + + #define EXT_MAX_BLOCK 0xffffffff + ++/* ++ * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an ++ * initialized extent. This is 2^15 and not (2^16 - 1), since we use the ++ * MSB of ee_len field in the extent datastructure to signify if this ++ * particular extent is an initialized extent or an uninitialized (i.e. ++ * preallocated). ++ * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an ++ * uninitialized extent. ++ * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an ++ * uninitialized one. In other words, if MSB of ee_len is set, it is an ++ * uninitialized extent with only one special scenario when ee_len = 0x8000. ++ * In this case we can not have an uninitialized extent of zero length and ++ * thus we make it as a special case of initialized extent with 0x8000 length. ++ * This way we get better extent-to-group alignment for initialized extents. ++ * Hence, the maximum number of blocks we can have in an *initialized* ++ * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767). ++ */ ++#define EXT_INIT_MAX_LEN (1UL << 15) ++#define EXT_UNINIT_MAX_LEN (EXT_INIT_MAX_LEN - 1) ++ + #define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */ + #define EXT_HDR_GEN_BITS 24 + #define EXT_HDR_GEN_MASK ((1 << EXT_HDR_GEN_BITS) - 1) +@@ -219,6 +239,13 @@ ext3_ext_invalidate_cache(struct inode * + EXT3_I(inode)->i_cached_extent.ec_type = EXT3_EXT_CACHE_NO; + } + ++static inline int ext3_ext_is_uninitialized(struct ext3_extent *ext) ++{ ++ /* Extent with ee_len of 0x8000 is treated as an initialized extent */ ++ return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN); ++} ++ ++ + extern int ext3_ext_search_left(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *); + extern int ext3_ext_search_right(struct inode *, struct ext3_ext_path *, unsigned long *, unsigned long *); + extern int ext3_extent_tree_init(handle_t *, struct inode *); +Index: linux-2.6.18/fs/ext3/extents.c +=================================================================== +--- linux-2.6.18.orig/fs/ext3/extents.c ++++ linux-2.6.18/fs/ext3/extents.c +@@ -42,7 +42,7 @@ + #include + #include + #include +- ++#include "fiemap.h" + + static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed) + { +@@ -1477,7 +1477,7 @@ int ext3_ext_walk_space(struct inode *in + } + + BUG_ON(cbex.ec_len == 0); +- err = func(inode, path, &cbex, cbdata); ++ err = func(inode, path, &cbex, ex, cbdata); + ext3_ext_drop_refs(path); + + if (err < 0) +@@ -2289,6 +2289,143 @@ int ext3_ext_writepage_trans_blocks(stru + return needed; + } + ++struct fiemap_internal { ++ struct fiemap *fiemap_s; ++ struct fiemap_extent fm_extent; ++ size_t tot_mapping_len; ++ char *cur_ext_ptr; ++ int current_extent; ++ int err; ++}; ++ ++/* ++ * Callback function called for each extent to gather fiemap information. ++ */ ++int ext3_ext_fiemap_cb(struct inode *inode, struct ext3_ext_path *path, ++ struct ext3_ext_cache *newex, struct ext3_extent *ex, ++ void *data) ++{ ++ struct fiemap_internal *fiemap_i = data; ++ struct fiemap *fiemap_s = fiemap_i->fiemap_s; ++ struct fiemap_extent *fm_extent = &fiemap_i->fm_extent; ++ int current_extent = fiemap_i->current_extent; ++ unsigned long blksize_bits = inode->i_sb->s_blocksize_bits; ++ ++ /* ++ * ext3_ext_walk_space returns a hole for extents that have not been ++ * allocated yet. ++ */ ++ if (((u64)(newex->ec_block + newex->ec_len) << blksize_bits >= ++ inode->i_size) && !ext3_ext_is_uninitialized(ex) && ++ newex->ec_type == EXT3_EXT_CACHE_GAP) ++ return EXT_BREAK; ++ ++ /* ++ * We only need to return number of extents. ++ */ ++ if (fiemap_s->fm_flags & FIEMAP_FLAG_NUM_EXTENTS) ++ goto count_extents; ++ ++ if (current_extent >= fiemap_s->fm_extent_count) ++ return EXT_BREAK; ++ ++ memset(fm_extent, 0, sizeof(*fm_extent)); ++ fm_extent->fe_offset = (__u64)newex->ec_start << blksize_bits; ++ fm_extent->fe_length = (__u64)newex->ec_len << blksize_bits; ++ fiemap_i->tot_mapping_len += fm_extent->fe_length; ++ ++ if (newex->ec_type == EXT3_EXT_CACHE_GAP) ++ fm_extent->fe_flags |= FIEMAP_EXTENT_HOLE; ++ ++ if (ext3_ext_is_uninitialized(ex)) ++ fm_extent->fe_flags |= (FIEMAP_EXTENT_DELALLOC | ++ FIEMAP_EXTENT_UNMAPPED); ++ ++ /* ++ * Mark this fiemap_extent as FIEMAP_EXTENT_EOF if it's past the end ++ * of file. ++ */ ++ if ((u64)(newex->ec_block + newex->ec_len) << blksize_bits >= ++ inode->i_size) ++ fm_extent->fe_flags |= FIEMAP_EXTENT_EOF; ++ ++ if (!copy_to_user(fiemap_i->cur_ext_ptr, fm_extent, ++ sizeof(struct fiemap_extent))) { ++ fiemap_i->cur_ext_ptr += sizeof(struct fiemap_extent); ++ } else { ++ fiemap_i->err = -EFAULT; ++ return EXT_BREAK; ++ } ++ ++count_extents: ++ fiemap_i->current_extent++; ++ ++ /* ++ * Stop if we are beyond requested mapping size but return complete last ++ * extent. ++ */ ++ if ((u64)(newex->ec_block + newex->ec_len) << blksize_bits >= ++ fiemap_s->fm_length) ++ return EXT_BREAK; ++ ++ return EXT_CONTINUE; ++} ++ ++int ext3_fiemap(struct inode *inode, struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct fiemap *fiemap_s; ++ struct fiemap_internal fiemap_i; ++ struct fiemap_extent *last_extent; ++ ext3_fsblk_t start_blk; ++ int err = 0; ++ ++ if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)) ++ return -EOPNOTSUPP; ++ ++ fiemap_s = kmalloc(sizeof(*fiemap_s), GFP_KERNEL); ++ if (fiemap_s == NULL) ++ return -ENOMEM; ++ if (copy_from_user(fiemap_s, (struct fiemap __user *)arg, ++ sizeof(*fiemap_s))) ++ return -EFAULT; ++ ++ if (fiemap_s->fm_flags & FIEMAP_FLAG_INCOMPAT) ++ return -EOPNOTSUPP; ++ ++ if (fiemap_s->fm_flags & FIEMAP_FLAG_SYNC) ++ ext3_sync_file(filp, filp->f_dentry, 1); ++ ++ start_blk = (fiemap_s->fm_start + inode->i_sb->s_blocksize - 1) >> ++ inode->i_sb->s_blocksize_bits; ++ fiemap_i.fiemap_s = fiemap_s; ++ fiemap_i.tot_mapping_len = 0; ++ fiemap_i.cur_ext_ptr = (char *)(arg + sizeof(*fiemap_s)); ++ fiemap_i.current_extent = 0; ++ fiemap_i.err = 0; ++ ++ /* ++ * Walk the extent tree gathering extent information ++ */ ++ mutex_lock(&EXT3_I(inode)->truncate_mutex); ++ err = ext3_ext_walk_space(inode, start_blk , EXT_MAX_BLOCK - start_blk, ++ ext3_ext_fiemap_cb, &fiemap_i); ++ mutex_unlock(&EXT3_I(inode)->truncate_mutex); ++ if (err) ++ return err; ++ ++ fiemap_s->fm_extent_count = fiemap_i.current_extent; ++ fiemap_s->fm_length = fiemap_i.tot_mapping_len; ++ if (fiemap_i.current_extent != 0 && ++ !(fiemap_s->fm_flags & FIEMAP_FLAG_NUM_EXTENTS)) { ++ last_extent = &fiemap_i.fm_extent; ++ last_extent->fe_flags |= FIEMAP_EXTENT_LAST; ++ } ++ err = copy_to_user((void *)arg, fiemap_s, sizeof(*fiemap_s)); ++ ++ return err; ++} ++ + EXPORT_SYMBOL(ext3_mark_inode_dirty); + EXPORT_SYMBOL(ext3_ext_invalidate_cache); + EXPORT_SYMBOL(ext3_ext_insert_extent); +Index: linux-2.6.18/fs/ext3/fiemap.h +=================================================================== +--- /dev/null ++++ linux-2.6.18/fs/ext3/fiemap.h +@@ -0,0 +1,49 @@ ++/* ++ * linux/fs/ext3/fiemap.h ++ * ++ * Copyright (C) 2007 Cluster File Systems, Inc ++ * ++ * Author: Kalpak Shah ++ */ ++ ++#ifndef _LINUX_EXT3_FIEMAP_H ++#define _LINUX_EXT3_FIEMAP_H ++ ++struct fiemap_extent { ++ __u64 fe_offset; /* offset in bytes for the start of the extent */ ++ __u64 fe_length; /* length in bytes for the extent */ ++ __u32 fe_flags; /* returned FIEMAP_EXTENT_* flags for the extent */ ++ __u32 fe_lun; /* logical device number for extent (starting at 0)*/ ++}; ++ ++/* ++ * fiemap is not ext3-specific and should be moved into fs.h eventually. ++ */ ++ ++struct fiemap { ++ __u64 fm_start; /* logical starting byte offset (in/out) */ ++ __u64 fm_length; /* logical length of map (in/out) */ ++ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ ++ __u32 fm_extent_count; /* number of extents in fm_extents (in/out) */ ++ __u64 fm_unused; ++ struct fiemap_extent fm_extents[0]; ++}; ++ ++#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ ++#define FIEMAP_FLAG_HSM_READ 0x00000002 /* get data from HSM before map */ ++#define FIEMAP_FLAG_NUM_EXTENTS 0x00000004 /* return only number of extents */ ++#define FIEMAP_FLAG_INCOMPAT 0xff000000 /* error for unknown flags in here */ ++ ++#define FIEMAP_EXTENT_HOLE 0x00000001 /* has no data or space allocation */ ++#define FIEMAP_EXTENT_UNWRITTEN 0x00000002 /* space allocated, but no data */ ++#define FIEMAP_EXTENT_UNMAPPED 0x00000004 /* has data but no space allocation*/ ++#define FIEMAP_EXTENT_ERROR 0x00000008 /* mapping error, errno in fe_start*/ ++#define FIEMAP_EXTENT_NO_DIRECT 0x00000010 /* cannot access data directly */ ++#define FIEMAP_EXTENT_LAST 0x00000020 /* last extent in the file */ ++#define FIEMAP_EXTENT_DELALLOC 0x00000040 /* has data but not yet written, ++ * must have EXTENT_UNKNOWN set */ ++#define FIEMAP_EXTENT_SECONDARY 0x00000080 /* data (also) in secondary storage, ++ * not in primary if EXTENT_UNKNOWN*/ ++#define FIEMAP_EXTENT_EOF 0x00000100 /* if fm_start+fm_len is beyond EOF*/ ++ ++#endif /* _LINUX_EXT3_FIEMAP_H */ +--- linux-src.org/fs/ext3/ext3_jbd.c ++++ linux-src/fs/ext3/ext3_jbd.c +@@ -2,6 +2,7 @@ + * Interface between ext3 and JBD + */ + ++#include + #include + + int __ext3_journal_get_undo_access(const char *where, handle_t *handle, +@@ -21,6 +21,7 @@ int __ext3_journal_get_write_access(cons + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; + } ++EXPORT_SYMBOL(__ext3_journal_get_write_access); + + int __ext3_journal_forget(const char *where, handle_t *handle, + struct buffer_head *bh) +@@ -57,3 +58,5 @@ int __ext3_journal_dirty_metadata(const + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; + } ++ ++EXPORT_SYMBOL(__ext3_journal_dirty_metadata); diff --git a/ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch new file mode 100644 index 0000000..6ea00f3 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-ialloc-2.6.22-vanilla.patch @@ -0,0 +1,128 @@ +Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c +=================================================================== +--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/ialloc.c 2005-05-16 14:10:54.000000000 -0600 ++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/ialloc.c 2005-05-16 14:18:29.000000000 -0600 +@@ -352,13 +352,17 @@ + return -1; + } + +-static int find_group_other(struct super_block *sb, struct inode *parent) ++static int find_group_other(struct super_block *sb, struct inode *parent, ++ int mode) + { + int parent_group = EXT3_I(parent)->i_block_group; ++ struct ext3_sb_info *sbi = EXT3_SB(sb); + int ngroups = EXT3_SB(sb)->s_groups_count; + struct ext3_group_desc *desc; + struct buffer_head *bh; + int group, i; ++ int best_group = -1; ++ int avefreeb, freeb, best_group_freeb = 0; + + /* + * Try to place the inode in its parent directory +@@ -366,9 +370,9 @@ + group = parent_group; + desc = ext3_get_group_desc (sb, group, &bh); + if (desc && le16_to_cpu(desc->bg_free_inodes_count) && +- le16_to_cpu(desc->bg_free_blocks_count)) ++ (!S_ISREG(mode) || le16_to_cpu(desc->bg_free_blocks_count))) + return group; +- ++ avefreeb = le32_to_cpu(sbi->s_es->s_free_blocks_count) / ngroups; + /* + * We're going to place this inode in a different blockgroup from its + * parent. We want to cause files in a common directory to all land in +@@ -381,33 +385,47 @@ + group = (group + parent->i_ino) % ngroups; + + /* +- * Use a quadratic hash to find a group with a free inode and some free +- * blocks. ++ * Use a quadratic hash to find a group with a free inode and ++ * average number of free blocks. + */ + for (i = 1; i < ngroups; i <<= 1) { + group += i; + if (group >= ngroups) + group -= ngroups; + desc = ext3_get_group_desc (sb, group, &bh); +- if (desc && le16_to_cpu(desc->bg_free_inodes_count) && +- le16_to_cpu(desc->bg_free_blocks_count)) ++ if (!desc || !desc->bg_free_inodes_count) ++ continue; ++ if (!S_ISREG(mode)) ++ return group; ++ if (le16_to_cpu(desc->bg_free_blocks_count) >= avefreeb) + return group; + } + + /* +- * That failed: try linear search for a free inode, even if that group +- * has no free blocks. ++ * That failed: start from last group used to allocate inode ++ * try linear search for a free inode and prefereably ++ * free blocks. + */ +- group = parent_group; ++ group = sbi->s_last_alloc_group; ++ if (group == -1) ++ group = parent_group; ++ + for (i = 0; i < ngroups; i++) { + if (++group >= ngroups) + group = 0; + desc = ext3_get_group_desc (sb, group, &bh); +- if (desc && le16_to_cpu(desc->bg_free_inodes_count)) +- return group; ++ if (!desc || !desc->bg_free_inodes_count) ++ continue; ++ freeb = le16_to_cpu(desc->bg_free_blocks_count); ++ if (freeb > best_group_freeb) { ++ best_group_freeb = freeb; ++ best_group = group; ++ if (freeb >= avefreeb || !S_ISREG(mode)) ++ break; ++ } + } +- +- return -1; ++ sbi->s_last_alloc_group = best_group; ++ return best_group; + } + + /* +@@ -454,7 +472,7 @@ + else + group = find_group_orlov(sb, dir); + } else +- group = find_group_other(sb, dir); ++ group = find_group_other(sb, dir, mode); + + err = -ENOSPC; + if (group == -1) +Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c +=================================================================== +--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/fs/ext3/super.c 2005-05-16 14:10:54.000000000 -0600 ++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/fs/ext3/super.c 2005-05-16 14:17:14.000000000 -0600 +@@ -1297,6 +1297,7 @@ + percpu_counter_init(&sbi->s_dirs_counter); + bgl_lock_init(&sbi->s_blockgroup_lock); + ++ sbi->s_last_alloc_group = -1; + for (i = 0; i < db_count; i++) { + block = descriptor_loc(sb, logic_sb_block, i); + sbi->s_group_desc[i] = sb_bread(sb, block); +Index: linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891.orig/include/linux/ext3_fs_sb.h 2005-05-16 14:10:54.000000000 -0600 ++++ linux-2.6.5-SLES9_SP1_BRANCH_2004111114454891/include/linux/ext3_fs_sb.h 2005-05-16 14:17:14.000000000 -0600 +@@ -59,6 +59,8 @@ + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct blockgroup_lock s_blockgroup_lock; ++ /* Last group used to allocate inode */ ++ int s_last_alloc_group; + + /* root of the per fs reservation window tree */ + spinlock_t s_rsv_window_lock; diff --git a/ldiskfs/kernel_patches/patches/ext3-iam-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-iam-2.6.22-vanilla.patch new file mode 100644 index 0000000..d2b4c0b --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-iam-2.6.22-vanilla.patch @@ -0,0 +1,2301 @@ +Index: linux-stage/include/linux/ext3_fs.h +=================================================================== +--- linux-stage.orig/include/linux/ext3_fs.h 2007-11-26 23:09:05.000000000 +0300 ++++ linux-stage/include/linux/ext3_fs.h 2007-11-26 23:09:06.000000000 +0300 +@@ -812,6 +812,9 @@ + #define DX_HASH_LEGACY 0 + #define DX_HASH_HALF_MD4 1 + #define DX_HASH_TEA 2 ++#define DX_HASH_R5 3 ++#define DX_HASH_SAME 4 ++#define DX_HASH_MAX 4 + + #ifdef __KERNEL__ + +@@ -942,9 +945,6 @@ + extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv); + + /* dir.c */ +-extern int ext3_check_dir_entry(const char *, struct inode *, +- struct ext3_dir_entry_2 *, +- struct buffer_head *, unsigned long); + extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, + __u32 minor_hash, + struct ext3_dir_entry_2 *dirent); +Index: linux-stage/include/linux/ext3_fs_i.h +=================================================================== +--- linux-stage.orig/include/linux/ext3_fs_i.h 2007-11-26 23:09:04.000000000 +0300 ++++ linux-stage/include/linux/ext3_fs_i.h 2007-11-26 23:16:00.000000000 +0300 +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #define HAVE_DISK_INODE_VERSION + +@@ -157,6 +157,11 @@ + struct mutex truncate_mutex; + struct inode vfs_inode; + ++ /* following fields for parallel directory operations -bzzz */ ++ struct dynlock i_htree_lock; ++ struct semaphore i_append_sem; ++ struct semaphore i_rename_sem; ++ + struct ext3_ext_cache i_cached_extent; + + /* mballoc */ +Index: linux-stage/fs/ext3/super.c +=================================================================== +--- linux-stage.orig/fs/ext3/super.c 2007-11-26 23:09:05.000000000 +0300 ++++ linux-stage/fs/ext3/super.c 2007-11-26 23:09:06.000000000 +0300 +@@ -464,6 +464,10 @@ + ei->i_block_alloc_info = NULL; + ei->vfs_inode.i_version = 1; + ++ dynlock_init(&ei->i_htree_lock); ++ sema_init(&ei->i_rename_sem, 1); ++ sema_init(&ei->i_append_sem, 1); ++ + memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent)); + INIT_LIST_HEAD(&ei->i_prealloc_list); + spin_lock_init(&ei->i_prealloc_lock); +@@ -695,6 +699,7 @@ + Opt_grpquota, + Opt_extents, Opt_noextents, Opt_extdebug, + Opt_mballoc, Opt_nomballoc, Opt_stripe, ++ Opt_hashfunc, + }; + + static match_table_t tokens = { +@@ -756,6 +760,7 @@ + {Opt_stripe, "stripe=%u"}, + {Opt_err, NULL}, + {Opt_resize, "resize"}, ++ {Opt_hashfunc,"hash=%s"}, + }; + + static ext3_fsblk_t get_sb_block(void **data) +@@ -779,6 +784,7 @@ + return sb_block; + } + ++int user_selected_hash_function = -1; + static int parse_options (char *options, struct super_block *sb, + unsigned int *inum, unsigned long *journal_devnum, + ext3_fsblk_t *n_blocks_count, int is_remount) +@@ -1120,6 +1126,22 @@ + return 0; + sbi->s_stripe = option; + break; ++ case Opt_hashfunc: ++ if (strncmp (args[0].from,"legacy",6) == 0){ ++ user_selected_hash_function = 0; ++ } else if (strncmp (args[0].from,"half_md4",8) == 0){ ++ user_selected_hash_function = 1; ++ } else if (strncmp (args[0].from,"tea",3) == 0){ ++ user_selected_hash_function = 2; ++ } else if (strncmp (args[0].from,"r5",2) == 0){ ++ user_selected_hash_function = 3; ++ } else if (strncmp (args[0].from,"same",4) == 0){ ++ user_selected_hash_function = 4; ++ } else { ++ printk ("Hashfunc name wrong\n"); ++ return 0; ++ } ++ break; + default: + printk (KERN_ERR + "EXT3-fs: Unrecognized mount option \"%s\" " +Index: linux-stage/fs/ext3/namei.c +=================================================================== +--- linux-stage.orig/fs/ext3/namei.c 2007-11-26 23:09:04.000000000 +0300 ++++ linux-stage/fs/ext3/namei.c 2007-11-26 23:09:06.000000000 +0300 +@@ -24,6 +24,7 @@ + * Theodore Ts'o, 2002 + */ + ++#include + #include + #include + #include +@@ -36,6 +37,7 @@ + #include + #include + #include ++#include + + #include "namei.h" + #include "xattr.h" +@@ -50,25 +52,29 @@ + #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) + #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) + +-static struct buffer_head *ext3_append(handle_t *handle, ++ ++struct buffer_head *ext3_append(handle_t *handle, + struct inode *inode, + u32 *block, int *err) + { + struct buffer_head *bh; ++ struct ext3_inode_info *ei = EXT3_I(inode); + ++ /* with parallel dir operations all appends ++ * have to be serialized -bzzz */ ++ down(&ei->i_append_sem); + *block = inode->i_size >> inode->i_sb->s_blocksize_bits; + +- if ((bh = ext3_bread(handle, inode, *block, 1, err))) { ++ bh = ext3_bread(handle, inode, *block, 1, err); ++ if (bh != NULL) { + inode->i_size += inode->i_sb->s_blocksize; +- EXT3_I(inode)->i_disksize = inode->i_size; +- ext3_journal_get_write_access(handle,bh); ++ ei->i_disksize = inode->i_size; + } ++ up(&ei->i_append_sem); ++ + return bh; + } + +-#ifndef assert +-#define assert(test) J_ASSERT(test) +-#endif + + #ifndef swap + #define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0) +@@ -77,167 +83,84 @@ + #define dxtrace(command) + #endif + +-struct fake_dirent +-{ +- __le32 inode; +- __le16 rec_len; +- u8 name_len; +- u8 file_type; +-}; +- +-struct dx_countlimit +-{ +- __le16 limit; +- __le16 count; +-}; +- +-struct dx_entry +-{ +- __le32 hash; +- __le32 block; +-}; +- +-/* +- * dx_root_info is laid out so that if it should somehow get overlaid by a +- * dirent the two low bits of the hash version will be zero. Therefore, the +- * hash version mod 4 should never be 0. Sincerely, the paranoia department. +- */ +- +-struct dx_root +-{ +- struct fake_dirent dot; +- char dot_name[4]; +- struct fake_dirent dotdot; +- char dotdot_name[4]; +- struct dx_root_info +- { +- __le32 reserved_zero; +- u8 hash_version; +- u8 info_length; /* 8 */ +- u8 indirect_levels; +- u8 unused_flags; +- } +- info; +- struct dx_entry entries[0]; +-}; +- +-struct dx_node +-{ +- struct fake_dirent fake; +- struct dx_entry entries[0]; +-}; +- +- +-struct dx_frame +-{ +- struct buffer_head *bh; +- struct dx_entry *entries; +- struct dx_entry *at; +-}; +- +-struct dx_map_entry +-{ +- u32 hash; +- u16 offs; +- u16 size; +-}; +- + #ifdef CONFIG_EXT3_INDEX +-static inline unsigned dx_get_block (struct dx_entry *entry); +-static void dx_set_block (struct dx_entry *entry, unsigned value); +-static inline unsigned dx_get_hash (struct dx_entry *entry); +-static void dx_set_hash (struct dx_entry *entry, unsigned value); +-static unsigned dx_get_count (struct dx_entry *entries); +-static unsigned dx_get_limit (struct dx_entry *entries); +-static void dx_set_count (struct dx_entry *entries, unsigned value); +-static void dx_set_limit (struct dx_entry *entries, unsigned value); +-static unsigned dx_root_limit (struct inode *dir, unsigned infosize); +-static unsigned dx_node_limit (struct inode *dir); +-static struct dx_frame *dx_probe(struct dentry *dentry, +- struct inode *dir, +- struct dx_hash_info *hinfo, +- struct dx_frame *frame, +- int *err); +-static void dx_release (struct dx_frame *frames); ++static inline unsigned dx_get_block(struct iam_path *p, struct iam_entry *entry); ++static void dx_set_block(struct iam_path *p, ++ struct iam_entry *entry, unsigned value); ++static unsigned dx_get_limit(struct iam_entry *entries); ++static void dx_set_count(struct iam_entry *entries, unsigned value); ++static void dx_set_limit(struct iam_entry *entries, unsigned value); ++static unsigned dx_root_limit(struct iam_path *p); ++static unsigned dx_node_limit(struct iam_path *p); ++static int dx_probe(struct qstr *name, ++ struct inode *dir, ++ struct dx_hash_info *hinfo, ++ struct iam_path *path); + static int dx_make_map (struct ext3_dir_entry_2 *de, int size, + struct dx_hash_info *hinfo, struct dx_map_entry map[]); + static void dx_sort_map(struct dx_map_entry *map, unsigned count); + static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to, + struct dx_map_entry *offsets, int count); + static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size); +-static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); +-static int ext3_htree_next_block(struct inode *dir, __u32 hash, +- struct dx_frame *frame, +- struct dx_frame *frames, +- __u32 *start_hash); + static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, + struct ext3_dir_entry_2 **res_dir, int *err); + static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, + struct inode *inode); +- +-/* +- * Future: use high four bits of block for coalesce-on-delete flags +- * Mask them off for now. +- */ +- +-static inline unsigned dx_get_block (struct dx_entry *entry) ++static inline void dx_set_limit(struct iam_entry *entries, unsigned value) + { +- return le32_to_cpu(entry->block) & 0x00ffffff; +-} +- +-static inline void dx_set_block (struct dx_entry *entry, unsigned value) +-{ +- entry->block = cpu_to_le32(value); +-} +- +-static inline unsigned dx_get_hash (struct dx_entry *entry) +-{ +- return le32_to_cpu(entry->hash); +-} +- +-static inline void dx_set_hash (struct dx_entry *entry, unsigned value) +-{ +- entry->hash = cpu_to_le32(value); +-} +- +-static inline unsigned dx_get_count (struct dx_entry *entries) +-{ +- return le16_to_cpu(((struct dx_countlimit *) entries)->count); +-} +- +-static inline unsigned dx_get_limit (struct dx_entry *entries) +-{ +- return le16_to_cpu(((struct dx_countlimit *) entries)->limit); ++ ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); + } + +-static inline void dx_set_count (struct dx_entry *entries, unsigned value) ++int dx_index_is_compat(struct iam_path *path) + { +- ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); ++ return iam_path_descr(path) == &iam_htree_compat_param; + } + +-static inline void dx_set_limit (struct dx_entry *entries, unsigned value) +-{ +- ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); +-} + +-static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize) ++int dx_node_check(struct iam_path *p, struct iam_frame *f) + { +- unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) - +- EXT3_DIR_REC_LEN(2) - infosize; +- return 0? 20: entry_space / sizeof(struct dx_entry); +-} ++ struct iam_entry *e; ++ struct iam_container *c; ++ unsigned count; ++ unsigned i; ++ iam_ptr_t blk; ++ iam_ptr_t root; ++ struct inode *inode; + +-static inline unsigned dx_node_limit (struct inode *dir) +-{ +- unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0); +- return 0? 22: entry_space / sizeof(struct dx_entry); ++ c = p->ip_container; ++ e = dx_node_get_entries(p, f); ++ count = dx_get_count(e); ++ e = iam_entry_shift(p, e, 1); ++ root = iam_path_descr(p)->id_ops->id_root_ptr(c); ++ ++ inode = iam_path_obj(p); ++ for (i = 0; i < count - 1; ++i, e = iam_entry_shift(p, e, 1)) { ++ iam_ikeycpy(c, iam_path_ikey(p, 0), iam_path_ikey(p, 1)); ++ iam_get_ikey(p, e, iam_path_ikey(p, 1)); ++ if (i > 0 && ++ iam_ikeycmp(c, iam_path_ikey(p, 0), ++ iam_path_ikey(p, 1)) > 0) ++ return 0; ++ blk = dx_get_block(p, e); ++ /* ++ * Disable this check as it is racy. ++ */ ++ if (0 && inode->i_size < (blk + 1) * inode->i_sb->s_blocksize) ++ return 0; ++ /* ++ * By definition of a tree, no node points to the root. ++ */ ++ if (blk == root) ++ return 0; ++ } ++ return 1; + } + + /* + * Debug + */ + #ifdef DX_DEBUG +-static void dx_show_index (char * label, struct dx_entry *entries) ++static void dx_show_index (char * label, struct iam_entry *entries) + { + int i, n = dx_get_count (entries); + printk("%s index ", label); +@@ -288,7 +212,7 @@ + } + + struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, +- struct dx_entry *entries, int levels) ++ struct iam_entry *entries, int levels) + { + unsigned blocksize = dir->i_sb->s_blocksize; + unsigned count = dx_get_count (entries), names = 0, space = 0, i; +@@ -334,160 +243,369 @@ + #endif /* DX_DEBUG */ + + /* +- * Probe for a directory leaf block to search. ++ * Per-node tree locking. + * +- * dx_probe can return ERR_BAD_DX_DIR, which means there was a format +- * error in the directory index, and the caller should fall back to +- * searching the directory normally. The callers of dx_probe **MUST** +- * check for this error code, and make sure it never gets reflected +- * back to userspace. + */ +-static struct dx_frame * +-dx_probe(struct dentry *dentry, struct inode *dir, +- struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) +-{ +- unsigned count, indirect; +- struct dx_entry *at, *entries, *p, *q, *m; +- struct dx_root *root; +- struct buffer_head *bh; +- struct dx_frame *frame = frame_in; +- u32 hash; + +- frame->bh = NULL; +- if (dentry) +- dir = dentry->d_parent->d_inode; +- if (!(bh = ext3_bread (NULL,dir, 0, 0, err))) +- goto fail; +- root = (struct dx_root *) bh->b_data; +- if (root->info.hash_version != DX_HASH_TEA && +- root->info.hash_version != DX_HASH_HALF_MD4 && +- root->info.hash_version != DX_HASH_LEGACY) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "Unrecognised inode hash code %d", +- root->info.hash_version); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail; ++/* FIXME: this should be reworked using bb_spin_lock ++ * introduced in -mm tree ++ */ ++#define BH_DXLock 25 ++ ++#define DX_DEBUG (1) ++ ++#if DX_DEBUG ++static struct dx_lock_stats { ++ unsigned dls_bh_lock; ++ unsigned dls_bh_busy; ++ unsigned dls_bh_again; ++ unsigned dls_bh_full_again; ++} dx_lock_stats = { 0, }; ++#define DX_DEVAL(x) x ++#else ++#define DX_DEVAL(x) ++#endif ++ ++static inline void dx_lock_bh(struct buffer_head volatile *bh) ++{ ++ DX_DEVAL(dx_lock_stats.dls_bh_lock++); ++#ifdef CONFIG_SMP ++ while (test_and_set_bit(BH_DXLock, &bh->b_state)) { ++ DX_DEVAL(dx_lock_stats.dls_bh_busy++); ++ while (test_bit(BH_DXLock, &bh->b_state)) ++ cpu_relax(); ++ } ++#endif ++} ++ ++static inline void dx_unlock_bh(struct buffer_head *bh) ++{ ++#ifdef CONFIG_SMP ++ smp_mb__before_clear_bit(); ++ clear_bit(BH_DXLock, &bh->b_state); ++#endif ++} ++ ++/* ++ * this locking primitives are used to protect parts ++ * of dir's htree. protection unit is block: leaf or index ++ */ ++struct dynlock_handle *dx_lock_htree(struct inode *dir, unsigned long value, ++ enum dynlock_type lt) ++{ ++ return dynlock_lock(&EXT3_I(dir)->i_htree_lock, value, lt, GFP_NOFS); ++} ++ ++void dx_unlock_htree(struct inode *dir, struct dynlock_handle *lh) ++{ ++ if (lh != NULL) ++ dynlock_unlock(&EXT3_I(dir)->i_htree_lock, lh); ++} ++ ++static void dx_unlock_array(struct inode *dir, struct dynlock_handle **lh) ++{ ++ int i; ++ ++ for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) { ++ if (*lh != NULL) { ++ dx_unlock_htree(dir, *lh); ++ *lh = NULL; ++ } + } +- hinfo->hash_version = root->info.hash_version; +- hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed; +- if (dentry) +- ext3fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo); +- hash = hinfo->hash; +- +- if (root->info.unused_flags & 1) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "Unimplemented inode hash flags: %#06x", +- root->info.unused_flags); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail; ++} ++ ++/* ++ * dx_find_position ++ * ++ * search position of specified hash in index ++ * ++ */ ++ ++struct iam_entry *dx_find_position(struct iam_path *path, ++ struct iam_frame *frame) ++{ ++ int count; ++ struct iam_entry *p; ++ struct iam_entry *q; ++ struct iam_entry *m; ++ ++ count = dx_get_count(frame->entries); ++ assert_corr(count && count <= dx_get_limit(frame->entries)); ++ p = iam_entry_shift(path, frame->entries, ++ dx_index_is_compat(path) ? 1 : 2); ++ q = iam_entry_shift(path, frame->entries, count - 1); ++ while (p <= q) { ++ m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2); ++ if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m), ++ path->ip_ikey_target) > 0) ++ q = iam_entry_shift(path, m, -1); ++ else ++ p = iam_entry_shift(path, m, +1); + } ++ return iam_entry_shift(path, p, -1); ++} + +- if ((indirect = root->info.indirect_levels) > 1) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "Unimplemented inode hash depth: %#06x", +- root->info.indirect_levels); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail; ++static iam_ptr_t dx_find_ptr(struct iam_path *path, struct iam_frame *frame) ++{ ++ return dx_get_block(path, dx_find_position(path, frame)); ++} ++ ++/* ++ * Fast check for frame consistency. ++ */ ++static int dx_check_fast(struct iam_path *path, struct iam_frame *frame) ++{ ++ struct iam_container *bag; ++ struct iam_entry *next; ++ struct iam_entry *last; ++ struct iam_entry *entries; ++ struct iam_entry *at; ++ ++ bag = path->ip_container; ++ at = frame->at; ++ entries = frame->entries; ++ last = iam_entry_shift(path, entries, dx_get_count(entries) - 1); ++ ++ if (unlikely(at > last)) ++ return -EAGAIN; ++ ++ if (unlikely(dx_get_block(path, at) != frame->leaf)) ++ return -EAGAIN; ++ ++ if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at), ++ path->ip_ikey_target) > 0)) ++ return -EAGAIN; ++ ++ next = iam_entry_shift(path, at, +1); ++ if (next <= last) { ++ if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next), ++ path->ip_ikey_target) <= 0)) ++ return -EAGAIN; + } ++ return 0; ++} + +- entries = (struct dx_entry *) (((char *)&root->info) + +- root->info.info_length); +- +- if (dx_get_limit(entries) != dx_root_limit(dir, +- root->info.info_length)) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "dx entry: limit != root limit"); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail; +- } +- +- dxtrace (printk("Look up %x", hash)); +- while (1) +- { +- count = dx_get_count(entries); +- if (!count || count > dx_get_limit(entries)) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "dx entry: no count or count > limit"); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail2; +- } +- +- p = entries + 1; +- q = entries + count - 1; +- while (p <= q) +- { +- m = p + (q - p)/2; +- dxtrace(printk(".")); +- if (dx_get_hash(m) > hash) +- q = m - 1; +- else +- p = m + 1; +- } ++/* ++ * returns 0 if path was unchanged, -EAGAIN otherwise. ++ */ ++static int dx_check_path(struct iam_path *path, struct iam_frame *frame) ++{ ++ int equal; + +- if (0) // linear search cross check +- { +- unsigned n = count - 1; +- at = entries; +- while (n--) +- { +- dxtrace(printk(",")); +- if (dx_get_hash(++at) > hash) +- { +- at--; +- break; +- } ++ dx_lock_bh(frame->bh); ++ equal = dx_check_fast(path, frame) == 0 || ++ frame->leaf == dx_find_ptr(path, frame); ++ DX_DEVAL(dx_lock_stats.dls_bh_again += !equal); ++ dx_unlock_bh(frame->bh); ++ ++ return equal ? 0 : -EAGAIN; ++} ++ ++/* ++ * returns 0 if path was unchanged, -EAGAIN otherwise. ++ */ ++static int dx_check_full_path(struct iam_path *path, int search) ++{ ++ struct iam_frame *bottom; ++ struct iam_frame *scan; ++ int i; ++ int result; ++ ++ do_corr(schedule()); ++ ++ for (bottom = path->ip_frames, i = 0; ++ i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) { ++ ; /* find last filled in frame */ ++ } ++ ++ /* ++ * Lock frames, bottom to top. ++ */ ++ for (scan = bottom - 1; scan >= path->ip_frames; --scan) ++ dx_lock_bh(scan->bh); ++ /* ++ * Check them top to bottom. ++ */ ++ result = 0; ++ for (scan = path->ip_frames; scan < bottom; ++scan) { ++ struct iam_entry *pos; ++ ++ if (search) { ++ if (dx_check_fast(path, scan) == 0) ++ continue; ++ ++ pos = dx_find_position(path, scan); ++ if (scan->leaf != dx_get_block(path, pos)) { ++ result = -EAGAIN; ++ break; ++ } ++ scan->at = pos; ++ } else { ++ pos = iam_entry_shift(path, scan->entries, ++ dx_get_count(scan->entries) - 1); ++ if (scan->at > pos || ++ scan->leaf != dx_get_block(path, scan->at)) { ++ result = -EAGAIN; ++ break; + } +- assert (at == p - 1); + } +- +- at = p - 1; +- dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); +- frame->bh = bh; +- frame->entries = entries; +- frame->at = at; +- if (!indirect--) return frame; +- if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err))) +- goto fail2; +- at = entries = ((struct dx_node *) bh->b_data)->entries; +- if (dx_get_limit(entries) != dx_node_limit (dir)) { +- ext3_warning(dir->i_sb, __FUNCTION__, +- "dx entry: limit != node limit"); +- brelse(bh); +- *err = ERR_BAD_DX_DIR; +- goto fail2; +- } +- frame++; +- frame->bh = NULL; +- } +-fail2: +- while (frame >= frame_in) { +- brelse(frame->bh); +- frame--; + } +-fail: +- if (*err == ERR_BAD_DX_DIR) +- ext3_warning(dir->i_sb, __FUNCTION__, +- "Corrupt dir inode %ld, running e2fsck is " +- "recommended.", dir->i_ino); +- return NULL; ++ ++ /* ++ * Unlock top to bottom. ++ */ ++ for (scan = path->ip_frames; scan < bottom; ++scan) ++ dx_unlock_bh(scan->bh); ++ DX_DEVAL(dx_lock_stats.dls_bh_full_again += !!result); ++ do_corr(schedule()); ++ ++ return result; + } + +-static void dx_release (struct dx_frame *frames) ++static int dx_lookup_try(struct iam_path *path) ++{ ++ u32 ptr; ++ int err = 0; ++ int i; ++ ++ struct iam_descr *param; ++ struct iam_frame *frame; ++ struct iam_container *c; ++ ++ param = iam_path_descr(path); ++ c = path->ip_container; ++ ++ ptr = param->id_ops->id_root_ptr(c); ++ for (frame = path->ip_frames, i = 0; i <= path->ip_indirect; ++ ++frame, ++i) { ++ err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL, ++ &frame->bh); ++ do_corr(schedule()); ++ ++ dx_lock_bh(frame->bh); ++ /* ++ * node must be initialized under bh lock because concurrent ++ * creation procedure may change it and dx_lookup_try() will ++ * see obsolete tree height. -bzzz ++ */ ++ if (err != 0) ++ break; ++ ++ if (EXT3_INVARIANT_ON) { ++ err = param->id_ops->id_node_check(path, frame); ++ if (err != 0) ++ break; ++ } ++ ++ err = param->id_ops->id_node_load(path, frame); ++ if (err != 0) ++ break; ++ ++ assert_inv(dx_node_check(path, frame)); ++ /* ++ * splitting may change root index block and move hash we're ++ * looking for into another index block so, we have to check ++ * this situation and repeat from begining if path got changed ++ * -bzzz ++ */ ++ if (i > 0) { ++ err = dx_check_path(path, frame - 1); ++ if (err != 0) ++ break; ++ } ++ ++ frame->at = dx_find_position(path, frame); ++ frame->curidx = ptr; ++ frame->leaf = ptr = dx_get_block(path, frame->at); ++ ++ dx_unlock_bh(frame->bh); ++ do_corr(schedule()); ++ } ++ if (err != 0) ++ dx_unlock_bh(frame->bh); ++ path->ip_frame = --frame; ++ return err; ++} ++ ++static int dx_lookup(struct iam_path *path) ++{ ++ int err; ++ int i; ++ ++ for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i) ++ assert(path->ip_frames[i].bh == NULL); ++ ++ do { ++ err = dx_lookup_try(path); ++ do_corr(schedule()); ++ if (err != 0) ++ iam_path_fini(path); ++ } while (err == -EAGAIN); ++ ++ return err; ++} ++ ++/* ++ * Performs path lookup and returns with found leaf (if any) locked by htree ++ * lock. ++ */ ++int dx_lookup_lock(struct iam_path *path, ++ struct dynlock_handle **dl, enum dynlock_type lt) + { +- if (frames[0].bh == NULL) +- return; ++ int result; ++ struct inode *dir; + +- if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels) +- brelse(frames[1].bh); +- brelse(frames[0].bh); ++ dir = iam_path_obj(path); ++ while ((result = dx_lookup(path)) == 0) { ++ do_corr(schedule()); ++ *dl = dx_lock_htree(dir, path->ip_frame->leaf, lt); ++ if (*dl == NULL) { ++ iam_path_fini(path); ++ result = -ENOMEM; ++ break; ++ } ++ do_corr(schedule()); ++ /* ++ * while locking leaf we just found may get split so we need ++ * to check this -bzzz ++ */ ++ if (dx_check_full_path(path, 1) == 0) ++ break; ++ dx_unlock_htree(dir, *dl); ++ *dl = NULL; ++ iam_path_fini(path); ++ } ++ return result; + } + + /* ++ * Probe for a directory leaf block to search. ++ * ++ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format ++ * error in the directory index, and the caller should fall back to ++ * searching the directory normally. The callers of dx_probe **MUST** ++ * check for this error code, and make sure it never gets reflected ++ * back to userspace. ++ */ ++static int dx_probe(struct qstr *name, struct inode *dir, ++ struct dx_hash_info *hinfo, struct iam_path *path) ++{ ++ int err; ++ struct iam_path_compat *ipc; ++ ++ assert_corr(path->ip_data != NULL); ++ ipc = container_of(path->ip_data, struct iam_path_compat, ipc_descr); ++ ipc->ipc_qstr = name; ++ ipc->ipc_hinfo = hinfo; ++ ++ assert_corr(dx_index_is_compat(path)); ++ err = dx_lookup(path); ++ assert_corr(err != 0 || path->ip_frames[path->ip_indirect].bh != NULL); ++ return err; ++} ++ ++ ++/* + * This function increments the frame pointer to search the next leaf + * block, and reads in the necessary intervening nodes if the search + * should be necessary. Whether or not the search is necessary is +@@ -463,17 +632,16 @@ + * If start_hash is non-null, it will be filled in with the starting + * hash of the next page. + */ +-static int ext3_htree_next_block(struct inode *dir, __u32 hash, +- struct dx_frame *frame, +- struct dx_frame *frames, +- __u32 *start_hash) ++static int ext3_htree_advance(struct inode *dir, __u32 hash, ++ struct iam_path *path, __u32 *start_hash, ++ int compat) + { +- struct dx_frame *p; ++ struct iam_frame *p; + struct buffer_head *bh; + int err, num_frames = 0; + __u32 bhash; + +- p = frame; ++ p = path->ip_frame; + /* + * Find the next leaf page by incrementing the frame pointer. + * If we run out of entries in the interior node, loop around and +@@ -482,14 +650,26 @@ + * nodes need to be read. + */ + while (1) { +- if (++(p->at) < p->entries + dx_get_count(p->entries)) ++ do_corr(schedule()); ++ dx_lock_bh(p->bh); ++ p->at = iam_entry_shift(path, p->at, +1); ++ if (p->at < iam_entry_shift(path, p->entries, ++ dx_get_count(p->entries))) { ++ p->leaf = dx_get_block(path, p->at); ++ dx_unlock_bh(p->bh); + break; +- if (p == frames) ++ } ++ dx_unlock_bh(p->bh); ++ if (p == path->ip_frames) + return 0; + num_frames++; +- p--; ++ --p; + } + ++ if (compat) { ++ /* ++ * Htree hash magic. ++ */ + /* + * If the hash is 1, then continue only if the next page has a + * continuation hash of any value. This is used for readdir +@@ -497,30 +677,146 @@ + * desired contiuation hash. If it doesn't, return since + * there's no point to read in the successive index pages. + */ +- bhash = dx_get_hash(p->at); ++ iam_get_ikey(path, p->at, (struct iam_ikey *)&bhash); + if (start_hash) + *start_hash = bhash; + if ((hash & 1) == 0) { + if ((bhash & ~1) != hash) + return 0; + } ++ } + /* + * If the hash is HASH_NB_ALWAYS, we always go to the next + * block so no check is necessary + */ + while (num_frames--) { +- if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at), +- 0, &err))) ++ iam_ptr_t idx; ++ ++ do_corr(schedule()); ++ dx_lock_bh(p->bh); ++ idx = p->leaf = dx_get_block(path, p->at); ++ dx_unlock_bh(p->bh); ++ err = iam_path_descr(path)->id_ops-> ++ id_node_read(path->ip_container, idx, NULL, &bh); ++ if (err != 0) + return err; /* Failure */ +- p++; +- brelse (p->bh); ++ ++p; ++ brelse(p->bh); ++ assert_corr(p->bh != bh); + p->bh = bh; +- p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; ++ p->entries = dx_node_get_entries(path, p); ++ p->at = iam_entry_shift(path, p->entries, !compat); ++ assert_corr(p->curidx != idx); ++ p->curidx = idx; ++ dx_lock_bh(p->bh); ++ assert_corr(p->leaf != dx_get_block(path, p->at)); ++ p->leaf = dx_get_block(path, p->at); ++ dx_unlock_bh(p->bh); ++ assert_inv(dx_node_check(path, p)); + } + return 1; + } + +- ++int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh) ++{ ++ struct iam_frame *f; ++ ++ for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) { ++ do_corr(schedule()); ++ *lh = dx_lock_htree(iam_path_obj(path), f->curidx, DLT_READ); ++ if (*lh == NULL) ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static int iam_index_advance(struct iam_path *path) ++{ ++ return ext3_htree_advance(iam_path_obj(path), 0, path, NULL, 0); ++} ++ ++/* ++ * Advance index part of @path to point to the next leaf. Returns 1 on ++ * success, 0, when end of container was reached. Leaf node is locked. ++ */ ++int iam_index_next(struct iam_container *c, struct iam_path *path) ++{ ++ iam_ptr_t cursor; ++ struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, }; ++ int result; ++ struct inode *object; ++ ++ /* ++ * Locking for iam_index_next()... is to be described. ++ */ ++ ++ object = c->ic_object; ++ cursor = path->ip_frame->leaf; ++ ++ while (1) { ++ result = iam_index_lock(path, lh); ++ do_corr(schedule()); ++ if (result < 0) ++ break; ++ ++ result = dx_check_full_path(path, 0); ++ if (result == 0 && cursor == path->ip_frame->leaf) { ++ result = iam_index_advance(path); ++ ++ assert_corr(result == 0 || ++ cursor != path->ip_frame->leaf); ++ break; ++ } ++ do { ++ dx_unlock_array(object, lh); ++ ++ iam_path_release(path); ++ do_corr(schedule()); ++ ++ result = dx_lookup(path); ++ if (result < 0) ++ break; ++ ++ while (path->ip_frame->leaf != cursor) { ++ do_corr(schedule()); ++ ++ result = iam_index_lock(path, lh); ++ do_corr(schedule()); ++ if (result < 0) ++ break; ++ ++ result = dx_check_full_path(path, 0); ++ if (result != 0) ++ break; ++ ++ result = iam_index_advance(path); ++ if (result == 0) { ++ ext3_error(object->i_sb, __FUNCTION__, ++ "cannot find cursor: %u\n", ++ cursor); ++ result = -EIO; ++ } ++ if (result < 0) ++ break; ++ result = dx_check_full_path(path, 0); ++ if (result != 0) ++ break; ++ dx_unlock_array(object, lh); ++ } ++ } while (result == -EAGAIN); ++ if (result < 0) ++ break; ++ } ++ dx_unlock_array(object, lh); ++ return result; ++} ++ ++int ext3_htree_next_block(struct inode *dir, __u32 hash, ++ struct iam_path *path, __u32 *start_hash) ++{ ++ return ext3_htree_advance(dir, hash, path, start_hash, 1); ++} ++ + /* + * p is at least 6 bytes before the end of page + */ +@@ -593,7 +889,8 @@ + { + struct dx_hash_info hinfo; + struct ext3_dir_entry_2 *de; +- struct dx_frame frames[2], *frame; ++ struct iam_path_compat cpath; ++ struct iam_path *path = &cpath.ipc_path; + struct inode *dir; + int block, err; + int count = 0; +@@ -603,6 +900,7 @@ + dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash, + start_minor_hash)); + dir = dir_file->f_path.dentry->d_inode; ++ iam_path_compat_init(&cpath, dir); + if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) { + hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version; + hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; +@@ -613,19 +911,19 @@ + } + hinfo.hash = start_hash; + hinfo.minor_hash = 0; +- frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err); +- if (!frame) ++ err = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, path); ++ if (err != 0) + return err; + + /* Add '.' and '..' from the htree header */ + if (!start_hash && !start_minor_hash) { +- de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data; ++ de = (struct ext3_dir_entry_2 *) path->ip_frames[0].bh->b_data; + if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0) + goto errout; + count++; + } + if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { +- de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data; ++ de = (struct ext3_dir_entry_2 *) path->ip_frames[0].bh->b_data; + de = ext3_next_entry(de); + if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0) + goto errout; +@@ -633,7 +931,7 @@ + } + + while (1) { +- block = dx_get_block(frame->at); ++ block = dx_get_block(path, path->ip_frame->at); + ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, + start_hash, start_minor_hash); + if (ret < 0) { +@@ -642,8 +940,8 @@ + } + count += ret; + hashval = ~0; +- ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, +- frame, frames, &hashval); ++ ret = ext3_htree_next_block(dir, ++ HASH_NB_ALWAYS, path, &hashval); + *next_hash = hashval; + if (ret < 0) { + err = ret; +@@ -658,12 +956,12 @@ + (count && ((hashval & 1) == 0))) + break; + } +- dx_release(frames); ++ iam_path_fini(path); + dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", + count, *next_hash)); + return count; + errout: +- dx_release(frames); ++ iam_path_fini(path); + return (err); + } + +@@ -723,19 +1021,45 @@ + } while(more); + } + +-static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block) ++void iam_insert_key(struct iam_path *path, struct iam_frame *frame, ++ const struct iam_ikey *key, iam_ptr_t ptr) + { +- struct dx_entry *entries = frame->entries; +- struct dx_entry *old = frame->at, *new = old + 1; ++ struct iam_entry *entries = frame->entries; ++ struct iam_entry *new = iam_entry_shift(path, frame->at, +1); + int count = dx_get_count(entries); + +- assert(count < dx_get_limit(entries)); +- assert(old < entries + count); +- memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); +- dx_set_hash(new, hash); +- dx_set_block(new, block); ++ /* ++ * Unfortunately we cannot assert this, as this function is sometimes ++ * called by VFS under i_sem and without pdirops lock. ++ */ ++ assert_corr(1 || iam_frame_is_locked(path, frame)); ++ assert_corr(count < dx_get_limit(entries)); ++ assert_corr(frame->at < iam_entry_shift(path, entries, count)); ++ assert_inv(dx_node_check(path, frame)); ++ ++ memmove(iam_entry_shift(path, new, 1), new, ++ (char *)iam_entry_shift(path, entries, count) - (char *)new); ++ dx_set_ikey(path, new, key); ++ dx_set_block(path, new, ptr); + dx_set_count(entries, count + 1); ++ assert_inv(dx_node_check(path, frame)); ++} ++ ++void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame, ++ const struct iam_ikey *key, iam_ptr_t ptr) ++{ ++ dx_lock_bh(frame->bh); ++ iam_insert_key(path, frame, key, ptr); ++ dx_unlock_bh(frame->bh); ++} ++ ++void dx_insert_block(struct iam_path *path, struct iam_frame *frame, ++ u32 hash, u32 block) ++{ ++ assert_corr(dx_index_is_compat(path)); ++ iam_insert_key(path, frame, (struct iam_ikey *)&hash, block); + } ++ + #endif + + +@@ -934,7 +1258,11 @@ + struct super_block * sb; + struct dx_hash_info hinfo; + u32 hash; +- struct dx_frame frames[2], *frame; ++ struct iam_path_compat cpath; ++ struct iam_path *path = &cpath.ipc_path; ++ struct iam_entry_compat dummy_dot = { ++ .block = 0 ++ }; + struct ext3_dir_entry_2 *de, *top; + struct buffer_head *bh; + unsigned long block; +@@ -943,21 +1271,25 @@ + const u8 *name = dentry->d_name.name; + struct inode *dir = dentry->d_parent->d_inode; + ++ iam_path_compat_init(&cpath, dir); ++ + sb = dir->i_sb; + /* NFS may look up ".." - look at dx_root directory block */ + if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){ +- if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err))) ++ *err = dx_probe(&dentry->d_name, NULL, &hinfo, path); ++ if (*err != 0) + return NULL; + } else { +- frame = frames; +- frame->bh = NULL; /* for dx_release() */ +- frame->at = (struct dx_entry *)frames; /* hack for zero entry*/ +- dx_set_block(frame->at, 0); /* dx_root block is 0 */ ++ path->ip_frame->bh = NULL; /* for iam_path_fini() */ ++ path->ip_frame->at = (void *)&dummy_dot;/* hack for zero entry*/ + } + hash = hinfo.hash; + do { +- block = dx_get_block(frame->at); +- if (!(bh = ext3_bread (NULL,dir, block, 0, err))) ++ block = dx_get_block(path, path->ip_frame->at); ++ *err = iam_path_descr(path)->id_ops->id_node_read(path->ip_container, ++ (iam_ptr_t)block, ++ NULL, &bh); ++ if (*err != 0) + goto errout; + de = (struct ext3_dir_entry_2 *) bh->b_data; + top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize - +@@ -972,13 +1304,12 @@ + goto errout; + } + *res_dir = de; +- dx_release (frames); ++ iam_path_fini(path); + return bh; + } + brelse (bh); + /* Check to see if we should continue to search */ +- retval = ext3_htree_next_block(dir, hash, frame, +- frames, NULL); ++ retval = ext3_htree_next_block(dir, hash, path, NULL); + if (retval < 0) { + ext3_warning(sb, __FUNCTION__, + "error reading index page in directory #%lu", +@@ -991,7 +1322,7 @@ + *err = -ENOENT; + errout: + dxtrace(printk("%s not found\n", name)); +- dx_release (frames); ++ iam_path_fini(path); + return NULL; + } + #endif +@@ -1124,19 +1455,69 @@ + * Allocate a new block, and move entries so that they are approx. equally full. + * Returns pointer to de in block into which the new entry will be inserted. + */ +-static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, +- struct buffer_head **bh,struct dx_frame *frame, +- struct dx_hash_info *hinfo, int *error) ++struct ext3_dir_entry_2 *move_entries(struct inode *dir, ++ struct dx_hash_info *hinfo, ++ struct buffer_head **bh1, ++ struct buffer_head **bh2, ++ __u32 *delim_hash) + { ++ char *data1; ++ char *data2; + unsigned blocksize = dir->i_sb->s_blocksize; +- unsigned count, continued; ++ unsigned count; ++ unsigned continued; ++ unsigned split; ++ u32 hash2; ++ ++ struct dx_map_entry *map; ++ struct ext3_dir_entry_2 *de1; ++ struct ext3_dir_entry_2 *de2; ++ ++ data1 = (*bh1)->b_data; ++ data2 = (*bh2)->b_data; ++ ++ /* create map in the end of data2 block */ ++ map = (struct dx_map_entry *) (data2 + blocksize); ++ count = dx_make_map((struct ext3_dir_entry_2 *) data1, ++ blocksize, hinfo, map); ++ map -= count; ++ split = count/2; // need to adjust to actual middle ++ dx_sort_map(map, count); ++ hash2 = map[split].hash; ++ continued = hash2 == map[split - 1].hash; ++ dxtrace(printk("Split block %i at %x, %i/%i\n", ++ frame->leaf, hash2, split, count - split)); ++ ++ /* Fancy dance to stay within two buffers */ ++ de2 = dx_move_dirents(data1, data2, map + split, count - split); ++ de1 = dx_pack_dirents(data1, blocksize); ++ de1->rec_len = cpu_to_le16(data1 + blocksize - (char *) de1); ++ de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2); ++ dxtrace(dx_show_leaf(hinfo, ++ (struct ext3_dir_entry_2 *) data1, blocksize, 1)); ++ dxtrace(dx_show_leaf(hinfo, ++ (struct ext3_dir_entry_2 *) data2, blocksize, 1)); ++ ++ /* Which block gets the new entry? */ ++ if (hinfo->hash >= hash2) { ++ swap(*bh1, *bh2); ++ de1 = de2; ++ } ++ *delim_hash = hash2 + continued; ++ return de1; ++} ++ ++/* Allocate new node, and split leaf node @bh into it, inserting new pointer ++ * into parent node identified by @frame */ ++static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct iam_path *path, ++ struct buffer_head **bh,struct iam_frame *frame, ++ struct dx_hash_info *hinfo, int *error) ++{ ++ struct inode *dir = iam_path_obj(path); + struct buffer_head *bh2; + u32 newblock; + u32 hash2; +- struct dx_map_entry *map; +- char *data1 = (*bh)->b_data, *data2; +- unsigned split, move, size, i; +- struct ext3_dir_entry_2 *de = NULL, *de2; ++ struct ext3_dir_entry_2 *de = NULL; + int err = 0; + + bh2 = ext3_append (handle, dir, &newblock, error); +@@ -1161,46 +1542,9 @@ + if (err) + goto journal_error; + +- data2 = bh2->b_data; +- +- /* create map in the end of data2 block */ +- map = (struct dx_map_entry *) (data2 + blocksize); +- count = dx_make_map ((struct ext3_dir_entry_2 *) data1, +- blocksize, hinfo, map); +- map -= count; +- dx_sort_map (map, count); +- /* Split the existing block in the middle, size-wise */ +- size = 0; +- move = 0; +- for (i = count-1; i >= 0; i--) { +- /* is more than half of this entry in 2nd half of the block? */ +- if (size + map[i].size/2 > blocksize/2) +- break; +- size += map[i].size; +- move++; +- } +- /* map index at which we will split */ +- split = count - move; +- hash2 = map[split].hash; +- continued = hash2 == map[split - 1].hash; +- dxtrace(printk("Split block %i at %x, %i/%i\n", +- dx_get_block(frame->at), hash2, split, count-split)); +- +- /* Fancy dance to stay within two buffers */ +- de2 = dx_move_dirents(data1, data2, map + split, count - split); +- de = dx_pack_dirents(data1,blocksize); +- de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de); +- de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2); +- dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1)); +- dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1)); ++ de = move_entries(dir, hinfo, bh, &bh2, &hash2); + +- /* Which block gets the new entry? */ +- if (hinfo->hash >= hash2) +- { +- swap(*bh, bh2); +- de = de2; +- } +- dx_insert_block (frame, hash2 + continued, newblock); ++ dx_insert_block(path, frame, hash2, newblock); + err = ext3_journal_dirty_metadata (handle, bh2); + if (err) + goto journal_error; +@@ -1203,6 +1558,63 @@ + } + #endif + ++struct ext3_dir_entry_2 *find_insertion_point(struct inode *dir, ++ struct buffer_head *bh, ++ const char *name, int namelen) ++{ ++ struct ext3_dir_entry_2 *de; ++ char *top; ++ unsigned long offset; ++ int nlen; ++ int rlen; ++ int reclen; ++ ++ reclen = EXT3_DIR_REC_LEN(namelen); ++ de = (struct ext3_dir_entry_2 *)bh->b_data; ++ top = bh->b_data + dir->i_sb->s_blocksize - reclen; ++ offset = 0; ++ while ((char *) de <= top) { ++ if (!ext3_check_dir_entry("ext3_add_entry", ++ dir, de, bh, offset)) ++ return ERR_PTR(-EIO); ++ if (ext3_match(namelen, name, de)) ++ return ERR_PTR(-EEXIST); ++ nlen = EXT3_DIR_REC_LEN(de->name_len); ++ rlen = le16_to_cpu(de->rec_len); ++ if ((de->inode? rlen - nlen: rlen) >= reclen) ++ return de; ++ de = (struct ext3_dir_entry_2 *)((char *)de + rlen); ++ offset += rlen; ++ } ++ return ERR_PTR(-ENOSPC); ++} ++ ++struct ext3_dir_entry_2 *split_entry(struct inode *dir, ++ struct ext3_dir_entry_2 *de, ++ unsigned long ino, mode_t mode, ++ const char *name, int namelen) ++{ ++ int nlen; ++ int rlen; ++ ++ nlen = EXT3_DIR_REC_LEN(de->name_len); ++ rlen = le16_to_cpu(de->rec_len); ++ if (de->inode) { ++ struct ext3_dir_entry_2 *de1; ++ ++ de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen); ++ de1->rec_len = cpu_to_le16(rlen - nlen); ++ de->rec_len = cpu_to_le16(nlen); ++ de = de1; ++ } ++ de->file_type = EXT3_FT_UNKNOWN; ++ de->inode = cpu_to_le32(ino); ++ if (ino != 0) ++ ext3_set_de_type(dir->i_sb, de, mode); ++ de->name_len = namelen; ++ memcpy(de->name, name, namelen); ++ return de; ++} + + /* + * Add a new entry into a directory (leaf) block. If de is non-NULL, +@@ -1222,34 +1634,16 @@ + struct inode *dir = dentry->d_parent->d_inode; + const char *name = dentry->d_name.name; + int namelen = dentry->d_name.len; +- unsigned long offset = 0; +- unsigned short reclen; +- int nlen, rlen, err; +- char *top; ++ int err; + +- reclen = EXT3_DIR_REC_LEN(namelen); + if (!de) { +- de = (struct ext3_dir_entry_2 *)bh->b_data; +- top = bh->b_data + dir->i_sb->s_blocksize - reclen; +- while ((char *) de <= top) { +- if (!ext3_check_dir_entry("ext3_add_entry", dir, de, +- bh, offset)) { +- brelse (bh); +- return -EIO; +- } +- if (ext3_match (namelen, name, de)) { +- brelse (bh); +- return -EEXIST; +- } +- nlen = EXT3_DIR_REC_LEN(de->name_len); +- rlen = le16_to_cpu(de->rec_len); +- if ((de->inode? rlen - nlen: rlen) >= reclen) +- break; +- de = (struct ext3_dir_entry_2 *)((char *)de + rlen); +- offset += rlen; ++ de = find_insertion_point(dir, bh, name, namelen); ++ if (IS_ERR(de)) { ++ err = PTR_ERR(de); ++ if (err != -ENOSPC) ++ brelse(bh); ++ return err; + } +- if ((char *) de > top) +- return -ENOSPC; + } + BUFFER_TRACE(bh, "get_write_access"); + err = ext3_journal_get_write_access(handle, bh); +@@ -1260,22 +1654,9 @@ + } + + /* By now the buffer is marked for journaling */ +- nlen = EXT3_DIR_REC_LEN(de->name_len); +- rlen = le16_to_cpu(de->rec_len); +- if (de->inode) { +- struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen); +- de1->rec_len = cpu_to_le16(rlen - nlen); +- de->rec_len = cpu_to_le16(nlen); +- de = de1; +- } +- de->file_type = EXT3_FT_UNKNOWN; +- if (inode) { +- de->inode = cpu_to_le32(inode->i_ino); +- ext3_set_de_type(dir->i_sb, de, inode->i_mode); +- } else +- de->inode = 0; +- de->name_len = namelen; +- memcpy (de->name, name, namelen); ++ ++ split_entry(dir, de, inode ? inode->i_ino : 0, ++ inode ? inode->i_mode : 0, name, namelen); + /* + * XXX shouldn't update any times until successful + * completion of syscall, but too many callers depend +@@ -1304,6 +1685,7 @@ + * This converts a one block unindexed directory to a 3 block indexed + * directory, and adds the dentry to the indexed directory. + */ ++extern int user_selected_hash_function; + static int make_indexed_dir(handle_t *handle, struct dentry *dentry, + struct inode *inode, struct buffer_head *bh) + { +@@ -1312,8 +1694,9 @@ + int namelen = dentry->d_name.len; + struct buffer_head *bh2; + struct dx_root *root; +- struct dx_frame frames[2], *frame; +- struct dx_entry *entries; ++ struct iam_path_compat cpath; ++ struct iam_path *path = &cpath.ipc_path; ++ struct iam_entry *entries; + struct ext3_dir_entry_2 *de, *de2; + char *data1, *top; + unsigned len; +@@ -1323,6 +1706,7 @@ + u32 block; + struct fake_dirent *fde; + ++ iam_path_compat_init(&cpath, dir); + blocksize = dir->i_sb->s_blocksize; + dxtrace(printk("Creating index\n")); + retval = ext3_journal_get_write_access(handle, bh); +@@ -1357,23 +1741,25 @@ + memset (&root->info, 0, sizeof(root->info)); + root->info.info_length = sizeof(root->info); + root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version; +- entries = root->entries; +- dx_set_block (entries, 1); ++ if (user_selected_hash_function >= 0 && ++ user_selected_hash_function <= DX_HASH_MAX) ++ root->info.hash_version = user_selected_hash_function; ++ entries = (void *)root->entries; ++ dx_set_block (path, entries, 1); + dx_set_count (entries, 1); +- dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info))); ++ dx_set_limit (entries, dx_root_limit(path)); + + /* Initialize as for dx_probe */ + hinfo.hash_version = root->info.hash_version; + hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; + ext3fs_dirhash(name, namelen, &hinfo); +- frame = frames; +- frame->entries = entries; +- frame->at = entries; +- frame->bh = bh; ++ path->ip_frame->entries = entries; ++ path->ip_frame->at = entries; ++ path->ip_frame->bh = bh; + bh = bh2; +- de = do_split(handle,dir, &bh, frame, &hinfo, &retval); +- dx_release (frames); +- if (!(de)) ++ de = do_split(handle, path, &bh, path->ip_frame, &hinfo, &retval); ++ iam_path_fini(path); ++ if (!de) + return retval; + + return add_dirent_to_buf(handle, dentry, inode, de, bh); +@@ -1444,139 +1830,384 @@ + return add_dirent_to_buf(handle, dentry, inode, de, bh); + } + ++static int shift_entries(struct iam_path *path, ++ struct iam_frame *frame, unsigned count, ++ struct iam_entry *entries, struct iam_entry *entries2, ++ u32 newblock) ++{ ++ unsigned count1; ++ unsigned count2; ++ int delta; ++ ++ struct iam_frame *parent = frame - 1; ++ struct iam_ikey *pivot = iam_path_ikey(path, 3); ++ ++ delta = dx_index_is_compat(path) ? 0 : +1; ++ ++ count1 = count/2 + delta; ++ count2 = count - count1; ++ iam_get_ikey(path, iam_entry_shift(path, entries, count1), pivot); ++ ++ dxtrace(printk("Split index %i/%i\n", count1, count2)); ++ ++ memcpy((char *) iam_entry_shift(path, entries2, delta), ++ (char *) iam_entry_shift(path, entries, count1), ++ count2 * iam_entry_size(path)); ++ ++ dx_set_count(entries2, count2 + delta); ++ dx_set_limit(entries2, dx_node_limit(path)); ++ ++ /* ++ * NOTE: very subtle piece of code competing dx_probe() may find 2nd ++ * level index in root index, then we insert new index here and set ++ * new count in that 2nd level index. so, dx_probe() may see 2nd level ++ * index w/o hash it looks for. the solution is to check root index ++ * after we locked just founded 2nd level index -bzzz ++ */ ++ iam_insert_key_lock(path, parent, pivot, newblock); ++ ++ /* ++ * now old and new 2nd level index blocks contain all pointers, so ++ * dx_probe() may find it in the both. it's OK -bzzz ++ */ ++ dx_lock_bh(frame->bh); ++ dx_set_count(entries, count1); ++ dx_unlock_bh(frame->bh); ++ ++ /* ++ * now old 2nd level index block points to first half of leafs. it's ++ * importand that dx_probe() must check root index block for changes ++ * under dx_lock_bh(frame->bh) -bzzz ++ */ ++ ++ return count1; ++} ++ + #ifdef CONFIG_EXT3_INDEX +-/* +- * Returns 0 for success, or a negative error value +- */ +-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, +- struct inode *inode) ++int split_index_node(handle_t *handle, struct iam_path *path, ++ struct dynlock_handle **lh) + { +- struct dx_frame frames[2], *frame; +- struct dx_entry *entries, *at; +- struct dx_hash_info hinfo; +- struct buffer_head * bh; +- struct inode *dir = dentry->d_parent->d_inode; +- struct super_block * sb = dir->i_sb; +- struct ext3_dir_entry_2 *de; +- int err; + +- frame = dx_probe(dentry, NULL, &hinfo, frames, &err); +- if (!frame) +- return err; +- entries = frame->entries; +- at = frame->at; ++ struct iam_entry *entries; /* old block contents */ ++ struct iam_entry *entries2; /* new block contents */ ++ struct iam_frame *frame, *safe; ++ struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0}; ++ u32 newblock[DX_MAX_TREE_HEIGHT] = {0}; ++ struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,}; ++ struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,}; ++ struct inode *dir = iam_path_obj(path); ++ struct iam_descr *descr; ++ int nr_splet; ++ int i, err; + +- if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err))) +- goto cleanup; ++ descr = iam_path_descr(path); ++ /* ++ * Algorithm below depends on this. ++ */ ++ assert_corr(dx_root_limit(path) < dx_node_limit(path)); + +- BUFFER_TRACE(bh, "get_write_access"); +- err = ext3_journal_get_write_access(handle, bh); +- if (err) +- goto journal_error; ++ frame = path->ip_frame; ++ entries = frame->entries; + +- err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); +- if (err != -ENOSPC) { +- bh = NULL; +- goto cleanup; +- } ++ /* ++ * Tall-tree handling: we might have to split multiple index blocks ++ * all the way up to tree root. Tricky point here is error handling: ++ * to avoid complicated undo/rollback we ++ * ++ * - first allocate all necessary blocks ++ * ++ * - insert pointers into them atomically. ++ */ ++ ++ /* ++ * Locking: leaf is already locked. htree-locks are acquired on all ++ * index nodes that require split bottom-to-top, on the "safe" node, ++ * and on all new nodes ++ */ + +- /* Block full, should compress but for now just split */ + dxtrace(printk("using %u of %u node entries\n", + dx_get_count(entries), dx_get_limit(entries))); +- /* Need to split index? */ +- if (dx_get_count(entries) == dx_get_limit(entries)) { +- u32 newblock; +- unsigned icount = dx_get_count(entries); +- int levels = frame - frames; +- struct dx_entry *entries2; +- struct dx_node *node2; +- struct buffer_head *bh2; + +- if (levels && (dx_get_count(frames->entries) == +- dx_get_limit(frames->entries))) { +- ext3_warning(sb, __FUNCTION__, +- "Directory index full!"); ++ /* What levels need split? */ ++ for (nr_splet = 0; frame >= path->ip_frames && ++ dx_get_count(frame->entries) == dx_get_limit(frame->entries); ++ --frame, ++nr_splet) { ++ do_corr(schedule()); ++ if (nr_splet == DX_MAX_TREE_HEIGHT) { ++ ext3_warning(dir->i_sb, __FUNCTION__, ++ "Directory index full!\n"); + err = -ENOSPC; + goto cleanup; + } +- bh2 = ext3_append (handle, dir, &newblock, &err); +- if (!(bh2)) ++ } ++ ++ safe = frame; ++ ++ /* ++ * Lock all nodes, bottom to top. ++ */ ++ for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) { ++ do_corr(schedule()); ++ lock[i] = dx_lock_htree(dir, frame->curidx, DLT_WRITE); ++ if (lock[i] == NULL) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ } ++ ++ /* ++ * Check for concurrent index modification. ++ */ ++ err = dx_check_full_path(path, 1); ++ if (err) ++ goto cleanup; ++ /* ++ * And check that the same number of nodes is to be split. ++ */ ++ for (i = 0, frame = path->ip_frame; frame >= path->ip_frames && ++ dx_get_count(frame->entries) == dx_get_limit(frame->entries); ++ --frame, ++i) { ++ ; ++ } ++ if (i != nr_splet) { ++ err = -EAGAIN; ++ goto cleanup; ++ } ++ ++ /* Go back down, allocating blocks, locking them, and adding into ++ * transaction... */ ++ for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) { ++ bh_new[i] = ext3_append (handle, dir, &newblock[i], &err); ++ do_corr(schedule()); ++ if (!bh_new[i] || ++ descr->id_ops->id_node_init(path->ip_container, ++ bh_new[i], 0) != 0) ++ goto cleanup; ++ new_lock[i] = dx_lock_htree(dir, newblock[i], DLT_WRITE); ++ if (new_lock[i] == NULL) { ++ err = -ENOMEM; + goto cleanup; +- node2 = (struct dx_node *)(bh2->b_data); +- entries2 = node2->entries; +- node2->fake.rec_len = cpu_to_le16(sb->s_blocksize); +- node2->fake.inode = 0; ++ } ++ do_corr(schedule()); + BUFFER_TRACE(frame->bh, "get_write_access"); + err = ext3_journal_get_write_access(handle, frame->bh); + if (err) + goto journal_error; +- if (levels) { +- unsigned icount1 = icount/2, icount2 = icount - icount1; +- unsigned hash2 = dx_get_hash(entries + icount1); +- dxtrace(printk("Split index %i/%i\n", icount1, icount2)); +- +- BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ +- err = ext3_journal_get_write_access(handle, +- frames[0].bh); ++ } ++ /* Add "safe" node to transaction too */ ++ if (safe + 1 != path->ip_frames) { ++ do_corr(schedule()); ++ err = ext3_journal_get_write_access(handle, safe->bh); ++ if (err) ++ goto journal_error; ++ } ++ ++ /* Go through nodes once more, inserting pointers */ ++ for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) { ++ unsigned count; ++ int idx; ++ struct buffer_head *bh2; ++ struct buffer_head *bh; ++ ++ entries = frame->entries; ++ count = dx_get_count(entries); ++ idx = iam_entry_diff(path, frame->at, entries); ++ ++ bh2 = bh_new[i]; ++ entries2 = dx_get_entries(path, bh2->b_data, 0); ++ ++ bh = frame->bh; ++ if (frame == path->ip_frames) { ++ /* splitting root node. Tricky point: ++ * ++ * In the "normal" B-tree we'd split root *and* add ++ * new root to the tree with pointers to the old root ++ * and its sibling (thus introducing two new nodes). ++ * ++ * In htree it's enough to add one node, because ++ * capacity of the root node is smaller than that of ++ * non-root one. ++ */ ++ struct iam_frame *frames; ++ struct iam_entry *next; ++ ++ assert_corr(i == 0); ++ ++ do_corr(schedule()); ++ ++ frames = path->ip_frames; ++ memcpy((char *) entries2, (char *) entries, ++ count * iam_entry_size(path)); ++ dx_set_limit(entries2, dx_node_limit(path)); ++ ++ /* Set up root */ ++ dx_lock_bh(frame->bh); ++ next = descr->id_ops->id_root_inc(path->ip_container, ++ path, frame); ++ dx_set_block(path, next, newblock[0]); ++ dx_unlock_bh(frame->bh); ++ ++ do_corr(schedule()); ++ /* Shift frames in the path */ ++ memmove(frames + 2, frames + 1, ++ (sizeof path->ip_frames) - 2 * sizeof frames[0]); ++ /* Add new access path frame */ ++ frames[1].at = iam_entry_shift(path, entries2, idx); ++ frames[1].entries = entries = entries2; ++ frames[1].bh = bh2; ++ assert_inv(dx_node_check(path, frame)); ++ ++ path->ip_frame; ++ ++ frame; ++ assert_inv(dx_node_check(path, frame)); ++ bh_new[0] = NULL; /* buffer head is "consumed" */ ++ err = ext3_journal_get_write_access(handle, bh2); + if (err) + goto journal_error; ++ do_corr(schedule()); ++ } else { ++ /* splitting non-root index node. */ ++ struct iam_frame *parent = frame - 1; + +- memcpy ((char *) entries2, (char *) (entries + icount1), +- icount2 * sizeof(struct dx_entry)); +- dx_set_count (entries, icount1); +- dx_set_count (entries2, icount2); +- dx_set_limit (entries2, dx_node_limit(dir)); +- ++ do_corr(schedule()); ++ count = shift_entries(path, frame, count, ++ entries, entries2, newblock[i]); + /* Which index block gets the new entry? */ +- if (at - entries >= icount1) { +- frame->at = at = at - entries - icount1 + entries2; ++ if (idx >= count) { ++ int d = dx_index_is_compat(path) ? 0 : +1; ++ ++ frame->at = iam_entry_shift(path, entries2, ++ idx - count + d); + frame->entries = entries = entries2; ++ frame->curidx = newblock[i]; + swap(frame->bh, bh2); ++ assert_corr(lock[i + 1] != NULL); ++ assert_corr(new_lock[i] != NULL); ++ swap(lock[i + 1], new_lock[i]); ++ bh_new[i] = bh2; ++ parent->at = iam_entry_shift(path, ++ parent->at, +1); + } +- dx_insert_block (frames + 0, hash2, newblock); +- dxtrace(dx_show_index ("node", frames[1].entries)); ++ assert_inv(dx_node_check(path, frame)); ++ assert_inv(dx_node_check(path, parent)); ++ dxtrace(dx_show_index ("node", frame->entries)); + dxtrace(dx_show_index ("node", + ((struct dx_node *) bh2->b_data)->entries)); + err = ext3_journal_dirty_metadata(handle, bh2); + if (err) + goto journal_error; +- brelse (bh2); +- } else { +- dxtrace(printk("Creating second level index...\n")); +- memcpy((char *) entries2, (char *) entries, +- icount * sizeof(struct dx_entry)); +- dx_set_limit(entries2, dx_node_limit(dir)); +- +- /* Set up root */ +- dx_set_count(entries, 1); +- dx_set_block(entries + 0, newblock); +- ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1; +- +- /* Add new access path frame */ +- frame = frames + 1; +- frame->at = at = at - entries + entries2; +- frame->entries = entries = entries2; +- frame->bh = bh2; +- err = ext3_journal_get_write_access(handle, +- frame->bh); ++ do_corr(schedule()); ++ err = ext3_journal_dirty_metadata(handle, parent->bh); + if (err) + goto journal_error; + } +- ext3_journal_dirty_metadata(handle, frames[0].bh); ++ do_corr(schedule()); ++ err = ext3_journal_dirty_metadata(handle, bh); ++ if (err) ++ goto journal_error; ++ } ++ /* ++ * This function was called to make insertion of new leaf ++ * possible. Check that it fulfilled its obligations. ++ */ ++ assert_corr(dx_get_count(path->ip_frame->entries) < ++ dx_get_limit(path->ip_frame->entries)); ++ assert_corr(lock[nr_splet] != NULL); ++ *lh = lock[nr_splet]; ++ lock[nr_splet] = NULL; ++ if (nr_splet > 0) { ++ /* ++ * Log ->i_size modification. ++ */ ++ err = ext3_mark_inode_dirty(handle, dir); ++ if (err) ++ goto journal_error; ++ } ++ goto cleanup; ++journal_error: ++ ext3_std_error(dir->i_sb, err); ++ ++cleanup: ++ dx_unlock_array(dir, lock); ++ dx_unlock_array(dir, new_lock); ++ ++ assert_corr(err || iam_frame_is_locked(path, path->ip_frame)); ++ ++ do_corr(schedule()); ++ for (i = 0; i < ARRAY_SIZE(bh_new); ++i) { ++ if (bh_new[i] != NULL) ++ brelse(bh_new[i]); ++ } ++ return err; ++} ++ ++/* ++ * Returns 0 for success, or a negative error value ++ */ ++static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, ++ struct inode *inode) ++{ ++ struct iam_path_compat cpath; ++ struct iam_path *path = &cpath.ipc_path; ++ struct iam_descr *param; ++ struct iam_frame *frame; ++ struct dx_hash_info hinfo; ++ struct buffer_head * bh = NULL; ++ struct inode *dir = dentry->d_parent->d_inode; ++ struct ext3_dir_entry_2 *de; ++ struct dynlock_handle *dummy = NULL; ++ int err; ++ size_t isize; ++ ++ iam_path_compat_init(&cpath, dir); ++ param = iam_path_descr(path); ++ ++ err = dx_probe(&dentry->d_name, NULL, &hinfo, path); ++ if (err != 0) ++ return err; ++ frame = path->ip_frame; ++ ++ isize = dir->i_size; ++ ++ err = param->id_ops->id_node_read(path->ip_container, ++ (iam_ptr_t)dx_get_block(path, frame->at), ++ handle, &bh); ++ if (err != 0) ++ goto cleanup; ++ ++ BUFFER_TRACE(bh, "get_write_access"); ++ err = ext3_journal_get_write_access(handle, bh); ++ if (err) ++ goto journal_error; ++ ++ err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); ++ if (err != -ENOSPC) { ++ bh = NULL; ++ goto cleanup; + } +- de = do_split(handle, dir, &bh, frame, &hinfo, &err); ++ ++ err = split_index_node(handle, path, &dummy); ++ if (err) ++ goto cleanup; ++ ++ /*copy split inode too*/ ++ de = do_split(handle, path, &bh, path->ip_frame, &hinfo, &err); + if (!de) + goto cleanup; ++ ++ assert_inv(dx_node_check(path, frame)); + err = add_dirent_to_buf(handle, dentry, inode, de, bh); +- bh = NULL; +- goto cleanup; ++ goto cleanup2; + + journal_error: + ext3_std_error(dir->i_sb, err); + cleanup: + if (bh) + brelse(bh); +- dx_release(frames); ++cleanup2: ++ dx_unlock_htree(dir, dummy); ++ if (err) ++ inode->i_size = isize; ++ iam_path_fini(path); + return err; + } + #endif +@@ -1678,6 +2309,26 @@ + return ext3_new_inode(handle, dir, mode, inum); + } + ++struct inode *ext3_create_inode(handle_t *handle, struct inode * dir, int mode) ++{ ++ struct inode *inode; ++ ++ inode = ext3_new_inode(handle, dir, mode, 0); ++ if (!IS_ERR(inode)) { ++ if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) { ++#ifdef CONFIG_LDISKFS_FS_XATTR ++ inode->i_op = &ext3_special_inode_operations; ++#endif ++ } else { ++ inode->i_op = &ext3_file_inode_operations; ++ inode->i_fop = &ext3_file_operations; ++ ext3_set_aops(inode); ++ } ++ } ++ return inode; ++} ++EXPORT_SYMBOL(ext3_create_inode); ++ + /* + * By the time this is called, we already have created + * the directory cache entry for the new file, but it +Index: linux-stage/fs/ext3/ioctl.c +=================================================================== +--- linux-stage.orig/fs/ext3/ioctl.c 2007-11-26 23:09:03.000000000 +0300 ++++ linux-stage/fs/ext3/ioctl.c 2007-11-26 23:09:06.000000000 +0300 +@@ -16,6 +16,7 @@ + + #include + #include ++#include + + int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, + unsigned long arg) +@@ -275,6 +276,6 @@ + + + default: +- return -ENOTTY; ++ return iam_uapi_ioctl(inode, filp, cmd, arg); + } + } +Index: linux-stage/fs/ext3/file.c +=================================================================== +--- linux-stage.orig/fs/ext3/file.c 2007-11-26 23:08:59.000000000 +0300 ++++ linux-stage/fs/ext3/file.c 2007-11-26 23:09:06.000000000 +0300 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include "xattr.h" + #include "acl.h" + +@@ -41,8 +42,12 @@ + ext3_discard_reservation(inode); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); + } +- if (is_dx(inode) && filp->private_data) +- ext3_htree_free_dir_info(filp->private_data); ++ if (is_dx(inode) && filp->private_data) { ++ if (S_ISDIR(inode->i_mode)) ++ ext3_htree_free_dir_info(filp->private_data); ++ else ++ ext3_iam_release(filp, inode); ++ } + + return 0; + } +Index: linux-stage/fs/ext3/hash.c +=================================================================== +--- linux-stage.orig/fs/ext3/hash.c 2007-11-26 23:08:59.000000000 +0300 ++++ linux-stage/fs/ext3/hash.c 2007-11-26 23:09:06.000000000 +0300 +@@ -49,6 +49,23 @@ + return (hash0 << 1); + } + ++static __u32 dx_r5_hash(const signed char *msg, int len) ++{ ++ __u32 a = 0; ++ while (len--) { ++ a += *msg << 4; ++ a += *msg >> 4; ++ a *= 11; ++ msg++; ++ } ++ return a; ++} ++ ++static __u32 dx_same_hash(const signed char *msg, int len) ++{ ++ return 0xcafebabeUL; ++} ++ + static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) + { + __u32 pad, val; +@@ -139,6 +156,12 @@ + hash = buf[0]; + minor_hash = buf[1]; + break; ++ case DX_HASH_R5: ++ hash = dx_r5_hash(name, len); ++ break; ++ case DX_HASH_SAME: ++ hash = dx_same_hash(name, len); ++ break; + default: + hinfo->hash = 0; + return -1; +Index: linux-stage/fs/ext3/Makefile +=================================================================== +--- linux-stage.orig/fs/ext3/Makefile 2007-11-26 23:09:05.000000000 +0300 ++++ linux-stage/fs/ext3/Makefile 2007-11-26 23:09:06.000000000 +0300 +@@ -6,7 +6,8 @@ + + ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \ + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \ +- extents.o mballoc.o dynlocks.o ++ extents.o mballoc.o dynlocks.o \ ++ iam.o iam_lfix.o iam_lvar.o iam_htree.o iam_uapi.o + + ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o + ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o +Index: linux-stage/fs/ext3/dir.c +=================================================================== +--- linux-stage.orig/fs/ext3/dir.c 2007-11-26 23:09:04.000000000 +0300 ++++ linux-stage/fs/ext3/dir.c 2007-11-26 23:09:06.000000000 +0300 +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + static unsigned char ext3_filetype_table[] = { + DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK +@@ -61,6 +62,7 @@ + } + + ++#if EXT3_INVARIANT_ON + int ext3_check_dir_entry (const char * function, struct inode * dir, + struct ext3_dir_entry_2 * de, + struct buffer_head * bh, +@@ -90,6 +92,7 @@ + rlen, de->name_len); + return error_msg == NULL ? 1 : 0; + } ++#endif + + static int ext3_readdir(struct file * filp, + void * dirent, filldir_t filldir) +@@ -304,12 +307,14 @@ + root->rb_node = NULL; + } + ++extern struct iam_private_info *ext3_iam_alloc_info(int flags); ++extern void ext3_iam_release_info(struct iam_private_info *info); + + static struct dir_private_info *create_dir_info(loff_t pos) + { + struct dir_private_info *p; + +- p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL); ++ p = (void *)ext3_iam_alloc_info(GFP_KERNEL); + if (!p) + return NULL; + p->root.rb_node = NULL; +@@ -325,6 +330,7 @@ + void ext3_htree_free_dir_info(struct dir_private_info *p) + { + free_rb_tree_fname(&p->root); ++ ext3_iam_release_info((void *)p); + kfree(p); + } + +Index: linux-stage/include/linux/lustre_iam.h +=================================================================== +--- linux-stage.orig/include/linux/lustre_iam.h ++++ linux-stage/include/linux/lustre_iam.h +@@ -5111,6 +5111,7 @@ + { + u32 hash; + u32 offs; ++ u16 size; + }; + + struct fake_dirent { diff --git a/ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch b/ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch new file mode 100644 index 0000000..ddc64d8 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-mballoc3-2.6.22.patch @@ -0,0 +1,612 @@ +Index: linux-2.6.18.8/include/linux/ext3_fs_i.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs_i.h 2007-07-17 09:18:53.000000000 +0200 +@@ -154,6 +154,10 @@ struct ext3_inode_info { + struct inode vfs_inode; + + struct ext3_ext_cache i_cached_extent; ++ ++ /* mballoc */ ++ struct list_head i_prealloc_list; ++ spinlock_t i_prealloc_lock; + }; + + #endif /* _LINUX_EXT3_FS_I */ +Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h 2007-07-17 09:18:53.000000000 +0200 +@@ -21,8 +21,15 @@ + #include + #include + #include ++#include + #endif + #include ++#include ++ ++struct ext3_buddy_group_blocks; ++struct ext3_locality_group; ++struct ext3_mb_history; ++#define EXT3_BB_MAX_BLOCKS + + /* + * third extended-fs super-block data in memory +Index: linux-2.6.18.8/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs.h 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs.h 2007-07-17 09:18:53.000000000 +0200 +@@ -17,6 +17,7 @@ + #define _LINUX_EXT3_FS_H + + #include ++#include + #include + + /* +@@ -67,12 +68,12 @@ + + struct ext3_allocation_request { + struct inode *inode; /* target inode for block we're allocating */ +- unsigned long logical; /* logical block in target inode */ +- unsigned long goal; /* phys. target (a hint) */ +- unsigned long lleft; /* the closest logical allocated block to the left */ +- unsigned long pleft; /* phys. block for ^^^ */ +- unsigned long lright; /* the closest logical allocated block to the right */ +- unsigned long pright; /* phys. block for ^^^ */ ++ ext3_fsblk_t logical; /* logical block in target inode */ ++ ext3_fsblk_t goal; /* phys. target (a hint) */ ++ ext3_fsblk_t lleft; /* the closest logical allocated block to the left */ ++ ext3_fsblk_t pleft; /* phys. block for ^^^ */ ++ ext3_fsblk_t lright; /* the closest logical allocated block to the right */ ++ ext3_fsblk_t pright; /* phys. block for ^^^ */ + unsigned long len; /* how many blocks we want to allocate */ + unsigned long flags; /* flags. see above EXT3_MB_HINT_* */ + }; +@@ -400,6 +401,7 @@ struct ext3_inode { + #define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */ + #define EXT3_MOUNT_EXTENTS 0x2000000/* Extents support */ + #define EXT3_MOUNT_EXTDEBUG 0x4000000/* Extents debug */ ++#define EXT3_MOUNT_MBALLOC 0x8000000/* Buddy allocation support */ + + /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ + #ifndef clear_opt +@@ -787,12 +789,12 @@ ext3_group_first_block_no(struct super_b + /* balloc.c */ + extern int ext3_bg_has_super(struct super_block *sb, int group); + extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); +-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode, ++extern ext3_fsblk_t ext3_new_block_old (handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, int *errp); +-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode, ++extern ext3_fsblk_t ext3_new_blocks_old (handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, unsigned long *count, int *errp); + extern void ext3_free_blocks (handle_t *handle, struct inode *inode, +- ext3_fsblk_t block, unsigned long count); ++ ext3_fsblk_t block, unsigned long count, int metadata); + extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb, + ext3_fsblk_t block, unsigned long count, + unsigned long *pdquot_freed_blocks); +@@ -836,15 +838,45 @@ extern long ext3_mb_stats; + extern long ext3_mb_max_to_scan; + extern int ext3_mb_init(struct super_block *, int); + extern int ext3_mb_release(struct super_block *); +-extern unsigned long ext3_mb_new_blocks(handle_t *, struct ext3_allocation_request *, int *); ++extern ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, ++ ext3_fsblk_t goal, int *errp); ++extern ext3_fsblk_t ext3_mb_new_blocks(handle_t *, ++ struct ext3_allocation_request *, int *); + extern int ext3_mb_reserve_blocks(struct super_block *, int); + extern void ext3_mb_release_blocks(struct super_block *, int); + extern void ext3_mb_release_blocks(struct super_block *, int); + extern void ext3_mb_discard_inode_preallocations(struct inode *); + extern int __init init_ext3_proc(void); + extern void exit_ext3_proc(void); +-extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, unsigned long, int, int *); ++extern void ext3_mb_free_blocks(handle_t *, struct inode *, unsigned long, ++ unsigned long, int, unsigned long *); ++ ++static inline ext3_fsblk_t ext3_new_blocks(handle_t *handle, ++ struct inode *inode, ++ ext3_fsblk_t goal, ++ unsigned long *count, int *errp) ++{ ++ struct ext3_allocation_request ar; ++ ext3_fsblk_t ret; + ++ if (!test_opt(inode->i_sb, MBALLOC)) { ++ ret = ext3_new_blocks_old(handle, inode, goal, count, errp); ++ return ret; ++ } ++ ++ ar.inode = inode; ++ ar.goal = goal; ++ ar.len = *count; ++ ar.logical = 0; ++ ar.lleft = 0; ++ ar.pleft = 0; ++ ar.lright = 0; ++ ar.pright = 0; ++ ar.flags = 0; ++ ret = ext3_mb_new_blocks(handle, &ar, errp); ++ *count = ar.len; ++ return ret; ++} + + /* inode.c */ + int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, +Index: linux-2.6.18.8/fs/ext3/super.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/super.c 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/super.c 2007-07-17 09:18:53.000000000 +0200 +@@ -688,6 +688,7 @@ enum { + Opt_iopen, Opt_noiopen, Opt_iopen_nopriv, + Opt_grpquota, + Opt_extents, Opt_noextents, Opt_extdebug, ++ Opt_mballoc, Opt_nomballoc, Opt_stripe, + }; + + static match_table_t tokens = { +@@ -743,6 +744,9 @@ static match_table_t tokens = { + {Opt_extents, "extents"}, + {Opt_noextents, "noextents"}, + {Opt_extdebug, "extdebug"}, ++ {Opt_mballoc, "mballoc"}, ++ {Opt_nomballoc, "nomballoc"}, ++ {Opt_stripe, "stripe=%u"}, + {Opt_err, NULL}, + {Opt_resize, "resize"}, + }; +@@ -1096,6 +1100,19 @@ clear_qf_name: + case Opt_extdebug: + set_opt (sbi->s_mount_opt, EXTDEBUG); + break; ++ case Opt_mballoc: ++ set_opt(sbi->s_mount_opt, MBALLOC); ++ break; ++ case Opt_nomballoc: ++ clear_opt(sbi->s_mount_opt, MBALLOC); ++ break; ++ case Opt_stripe: ++ if (match_int(&args[0], &option)) ++ return 0; ++ if (option < 0) ++ return 0; ++ sbi->s_stripe = option; ++ break; + default: + printk (KERN_ERR + "EXT3-fs: Unrecognized mount option \"%s\" " +@@ -1826,6 +1843,7 @@ static int ext3_fill_super (struct super + "writeback"); + + ext3_ext_init(sb); ++ ext3_mb_init(sb, needs_recovery); + + lock_kernel(); + return 0; +Index: linux-2.6.18.8/fs/ext3/extents.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/extents.c 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/extents.c 2007-07-17 09:18:53.000000000 +0200 +@@ -795,7 +795,7 @@ cleanup: + for (i = 0; i < depth; i++) { + if (!ablocks[i]) + continue; +- ext3_free_blocks(handle, inode, ablocks[i], 1); ++ ext3_free_blocks(handle, inode, ablocks[i], 1, 1); + } + } + kfree(ablocks); +@@ -1613,7 +1613,7 @@ int ext3_ext_rm_idx(handle_t *handle, st + ext_debug(inode, "index is empty, remove it, free block %lu\n", leaf); + bh = sb_find_get_block(inode->i_sb, leaf); + ext3_forget(handle, 1, inode, bh, leaf); +- ext3_free_blocks(handle, inode, leaf, 1); ++ ext3_free_blocks(handle, inode, leaf, 1, 1); + return err; + } + +@@ -1672,7 +1672,7 @@ static int ext3_remove_blocks(handle_t * + unsigned long from, unsigned long to) + { + struct buffer_head *bh; +- int i; ++ int i, metadata = 0; + + #ifdef EXTENTS_STATS + { +@@ -1690,6 +1690,8 @@ static int ext3_remove_blocks(handle_t * + spin_unlock(&sbi->s_ext_stats_lock); + } + #endif ++ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) ++ metadata = 1; + if (from >= le32_to_cpu(ex->ee_block) + && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { + /* tail removal */ +@@ -1701,7 +1703,7 @@ static int ext3_remove_blocks(handle_t * + bh = sb_find_get_block(inode->i_sb, start + i); + ext3_forget(handle, 0, inode, bh, start + i); + } +- ext3_free_blocks(handle, inode, start, num); ++ ext3_free_blocks(handle, inode, start, num, metadata); + } else if (from == le32_to_cpu(ex->ee_block) + && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { + printk("strange request: removal %lu-%lu from %u:%u\n", +@@ -2034,7 +2036,7 @@ int ext3_ext_get_blocks(handle_t *handle + struct ext3_extent newex, *ex; + int goal, newblock, err = 0, depth; + unsigned long allocated = 0; +- unsigned long next; ++ struct ext3_allocation_request ar; + + __clear_bit(BH_New, &bh_result->b_state); + ext_debug(inode, "blocks %d/%lu requested for inode %u\n", (int) iblock, +@@ -2116,18 +2118,33 @@ int ext3_ext_get_blocks(handle_t *handle + if (S_ISREG(inode->i_mode) && (!EXT3_I(inode)->i_block_alloc_info)) + ext3_init_block_alloc_info(inode); + ++ /* find neighbour allocated blocks */ ++ ar.lleft = iblock; ++ err = ext3_ext_search_left(inode, path, &ar.lleft, &ar.pleft); ++ if (err) ++ goto out2; ++ ar.lright = iblock; ++ err = ext3_ext_search_right(inode, path, &ar.lright, &ar.pright); ++ if (err) ++ goto out2; ++ + /* find next allocated block so that we know how many + * blocks we can allocate without ovelapping next extent */ +- BUG_ON(iblock < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)); +- next = ext3_ext_next_allocated_block(path); +- BUG_ON(next <= iblock); +- allocated = next - iblock; ++ BUG_ON(ar.pright != 0 && ar.lright <= iblock); ++ if (ar.pright == 0) ++ allocated = EXT_MAX_BLOCK - iblock; ++ else ++ allocated = ar.lright - iblock; + if (allocated > max_blocks) + allocated = max_blocks; + + /* allocate new block */ +- goal = ext3_ext_find_goal(inode, path, iblock); +- newblock = ext3_new_blocks(handle, inode, goal, &allocated, &err); ++ ar.inode = inode; ++ ar.goal = ext3_ext_find_goal(inode, path, iblock); ++ ar.logical = iblock; ++ ar.len = allocated; ++ ar.flags = EXT3_MB_HINT_DATA; ++ newblock = ext3_mb_new_blocks(handle, &ar, &err); + if (!newblock) + goto out2; + ext_debug(inode, "allocate new block: goal %d, found %d/%lu\n", +@@ -2137,12 +2154,16 @@ int ext3_ext_get_blocks(handle_t *handle + newex.ee_block = cpu_to_le32(iblock); + newex.ee_start = cpu_to_le32(newblock); + newex.ee_start_hi = 0; +- newex.ee_len = cpu_to_le16(allocated); ++ newex.ee_len = cpu_to_le16(ar.len); + err = ext3_ext_insert_extent(handle, inode, path, &newex); + if (err) { + /* free data blocks we just allocated */ +- ext3_free_blocks(handle, inode, le32_to_cpu(newex.ee_start), +- le16_to_cpu(newex.ee_len)); ++ /* not a good idea to call discard here directly, ++ * but otherwise we'd need to call it every free() */ ++ ext3_mb_discard_inode_preallocations(inode); ++ ext3_free_blocks(handle, inode, newex.ee_start, ++ newex.ee_len, 0); ++ + goto out2; + } + +@@ -2151,6 +2172,7 @@ int ext3_ext_get_blocks(handle_t *handle + + /* previous routine could use block we allocated */ + newblock = le32_to_cpu(newex.ee_start); ++ allocated = le16_to_cpu(newex.ee_len); + __set_bit(BH_New, &bh_result->b_state); + + ext3_ext_put_in_cache(inode, iblock, allocated, newblock, +@@ -2202,6 +2224,9 @@ void ext3_ext_truncate(struct inode * in + mutex_lock(&EXT3_I(inode)->truncate_mutex); + ext3_ext_invalidate_cache(inode); + ++ /* it's important to discard preallocations under truncate_mutex */ ++ ext3_mb_discard_inode_preallocations(inode); ++ + /* + * TODO: optimization is possible here + * probably we need not scaning at all, +Index: linux-2.6.18.8/fs/ext3/Makefile +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/Makefile 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/Makefile 2007-07-17 09:18:53.000000000 +0200 +@@ -5,7 +5,7 @@ + + ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \ + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o \ +- extents.o ++ extents.o mballoc.o + + ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o + ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o +Index: linux-2.6.18.8/fs/ext3/xattr.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/xattr.c 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/fs/ext3/xattr.c 2007-07-17 09:18:53.000000000 +0200 +@@ -484,7 +484,7 @@ ext3_xattr_release_block(handle_t *handl + ea_bdebug(bh, "refcount now=0; freeing"); + if (ce) + mb_cache_entry_free(ce); +- ext3_free_blocks(handle, inode, bh->b_blocknr, 1); ++ ext3_free_blocks(handle, inode, bh->b_blocknr, 1, 1); + get_bh(bh); + ext3_forget(handle, 1, inode, bh, bh->b_blocknr); + } else { +@@ -805,7 +805,7 @@ inserted: + new_bh = sb_getblk(sb, block); + if (!new_bh) { + getblk_failed: +- ext3_free_blocks(handle, inode, block, 1); ++ ext3_free_blocks(handle, inode, block, 1, 1); + error = -EIO; + goto cleanup; + } +Index: linux-2.6.18.8/fs/ext3/balloc.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/balloc.c 2007-02-24 00:52:30.000000000 +0100 ++++ linux-2.6.18.8/fs/ext3/balloc.c 2007-07-17 09:18:53.000000000 +0200 +@@ -79,7 +79,7 @@ struct ext3_group_desc * ext3_get_group_ + * + * Return buffer_head on success or NULL in case of failure. + */ +-static struct buffer_head * ++struct buffer_head * + read_block_bitmap(struct super_block *sb, unsigned int block_group) + { + struct ext3_group_desc * desc; +@@ -294,6 +294,8 @@ void ext3_discard_reservation(struct ino + struct ext3_reserve_window_node *rsv; + spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; + ++ ext3_mb_discard_inode_preallocations(inode); ++ + if (!block_i) + return; + +@@ -490,19 +492,24 @@ + * @count: number of blocks to count + */ + void ext3_free_blocks(handle_t *handle, struct inode *inode, +- ext3_fsblk_t block, unsigned long count) ++ ext3_fsblk_t block, unsigned long count, int metadata) + { +- struct super_block * sb; +- unsigned long dquot_freed_blocks; ++ struct super_block *sb; ++ unsigned long freed; ++ ++ /* this isn't the right place to decide whether block is metadata ++ * inode.c/extents.c knows better, but for safety ... */ ++ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || ++ ext3_should_journal_data(inode)) ++ metadata = 1; + + sb = inode->i_sb; +- if (!sb) { +- printk ("ext3_free_blocks: nonexistent device"); +- return; +- } +- ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); +- if (dquot_freed_blocks) +- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); ++ if (!test_opt(sb, MBALLOC) || !EXT3_SB(sb)->s_group_info) ++ ext3_free_blocks_sb(handle, sb, block, count, &freed); ++ else ++ ext3_mb_free_blocks(handle, inode, block, count, metadata, &freed); ++ if (freed) ++ DQUOT_FREE_BLOCK(inode, freed); + return; + } + +@@ -1199,7 +1205,7 @@ int ext3_should_retry_alloc(struct super + * bitmap, and then for any free bit if that fails. + * This function also updates quota and i_blocks field. + */ +-ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, ++ext3_fsblk_t ext3_new_blocks_old(handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, unsigned long *count, int *errp) + { + struct buffer_head *bitmap_bh = NULL; +@@ -1463,7 +1469,7 @@ out: + return 0; + } + +-ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, ++ext3_fsblk_t ext3_new_block_old(handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, int *errp) + { + unsigned long count = 1; +Index: linux-2.6.18.8/fs/ext3/inode.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/inode.c 2007-07-17 09:18:14.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/inode.c 2007-07-17 09:18:53.000000000 +0200 +@@ -560,7 +560,7 @@ static int ext3_alloc_blocks(handle_t *h + return ret; + failed_out: + for (i = 0; i bb_state); + } + +-unsigned long ext3_grp_offs_to_block(struct super_block *sb, ++ext3_fsblk_t ext3_grp_offs_to_block(struct super_block *sb, + struct ext3_free_extent *fex) + { +- unsigned long block; ++ ext3_fsblk_t block; + +- block = (unsigned long) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb) ++ block = (ext3_fsblk_t) fex->fe_group * EXT3_BLOCKS_PER_GROUP(sb) + + fex->fe_start + + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block); + return block; +@@ -3174,7 +3174,7 @@ void ext3_mb_collect_stats(struct ext3_a + void ext3_mb_use_inode_pa(struct ext3_allocation_context *ac, + struct ext3_prealloc_space *pa) + { +- unsigned long start, len; ++ ext3_fsblk_t start, len; + + /* found preallocated blocks, use them */ + start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); +@@ -4027,13 +4027,13 @@ int ext3_mb_discard_preallocations(struc + * it tries to use preallocation first, then falls back + * to usual allocation + */ +-unsigned long ext3_mb_new_blocks(handle_t *handle, ++ext3_fsblk_t ext3_mb_new_blocks(handle_t *handle, + struct ext3_allocation_request *ar, int *errp) + { + struct ext3_allocation_context ac; + struct ext3_sb_info *sbi; + struct super_block *sb; +- unsigned long block = 0; ++ ext3_fsblk_t block = 0; + int freed, inquota; + + sb = ar->inode->i_sb; +@@ -4044,8 +4044,8 @@ unsigned long ext3_mb_new_blocks(handle_ + if (ext3_mballoc_warning++ == 0) + printk(KERN_ERR "EXT3-fs: multiblock request with " + "mballoc disabled!\n"); +- ar->len = 1; +- block = ext3_new_block_old(handle, ar->inode, ar->goal, errp); ++ block = ext3_new_blocks_old(handle, ar->inode, ar->goal, ++ &ar->len, errp); + return block; + } + +@@ -4109,11 +4109,11 @@ out: + } + EXPORT_SYMBOL(ext3_mb_new_blocks); + +-int ext3_new_block(handle_t *handle, struct inode *inode, +- unsigned long goal, int *errp) ++ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, ++ ext3_fsblk_t goal, int *errp) + { + struct ext3_allocation_request ar; +- unsigned long ret; ++ ext3_fsblk_t ret; + + if (!test_opt(inode->i_sb, MBALLOC)) { + ret = ext3_new_block_old(handle, inode, goal, errp); +@@ -4228,8 +4228,8 @@ int ext3_mb_free_metadata(handle_t *hand + * Main entry point into mballoc to free blocks + */ + void ext3_mb_free_blocks(handle_t *handle, struct inode *inode, +- unsigned long block, unsigned long count, +- int metadata, int *freed) ++ ext3_fsblk_t block, unsigned long count, ++ int metadata, unsigned long *freed) + { + struct buffer_head *bitmap_bh = NULL; + struct super_block *sb = inode->i_sb; diff --git a/ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch new file mode 100644 index 0000000..afb34e1 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-mmp-2.6.22-vanilla.patch @@ -0,0 +1,463 @@ +Index: linux-2.6.18/fs/ext3/super.c +=================================================================== +--- linux-2.6.18.orig/fs/ext3/super.c ++++ linux-2.6.18/fs/ext3/super.c +@@ -35,6 +35,8 @@ + #include + #include + #include ++#include ++#include + + #include + +@@ -435,6 +437,9 @@ static void ext3_put_super (struct super + invalidate_bdev(sbi->journal_bdev, 0); + ext3_blkdev_remove(sbi); + } ++ if (sbi->s_mmp_tsk) ++ kthread_stop(sbi->s_mmp_tsk); ++ + sb->s_fs_info = NULL; + kfree(sbi); + return; +@@ -1528,6 +1533,313 @@ static ext3_fsblk_t descriptor_loc(struc + return (has_super + ext3_group_first_block_no(sb, bg)); + } + ++/* ++ * Write the MMP block using WRITE_SYNC to try to get the block on-disk ++ * faster. ++ */ ++static int write_mmp_block(struct buffer_head *bh) ++{ ++ mark_buffer_dirty(bh); ++ lock_buffer(bh); ++ bh->b_end_io = end_buffer_write_sync; ++ get_bh(bh); ++ submit_bh(WRITE_SYNC, bh); ++ wait_on_buffer(bh); ++ if (unlikely(!buffer_uptodate(bh))) ++ return 1; ++ ++ return 0; ++} ++ ++/* ++ * Read the MMP block. It _must_ be read from disk and hence we clear the ++ * uptodate flag on the buffer. ++ */ ++static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, ++ unsigned long mmp_block) ++{ ++ struct mmp_struct *mmp; ++ ++ if (*bh) ++ clear_buffer_uptodate(*bh); ++ ++ brelse(*bh); ++ ++ *bh = sb_bread(sb, mmp_block); ++ if (!*bh) { ++ ext3_warning(sb, __FUNCTION__, ++ "Error while reading MMP block %lu", mmp_block); ++ return -EIO; ++ } ++ ++ mmp = (struct mmp_struct *)((*bh)->b_data); ++ if (le32_to_cpu(mmp->mmp_magic) != EXT3_MMP_MAGIC) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/* ++ * Dump as much information as possible to help the admin. ++ */ ++static void dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, ++ const char *function, const char *msg) ++{ ++ ext3_warning(sb, function, msg); ++ ext3_warning(sb, function, "MMP failure info: last update time: %llu, " ++ "last update node: %s, last update device: %s\n", ++ le64_to_cpu(mmp->mmp_time), mmp->mmp_nodename, ++ mmp->mmp_bdevname); ++} ++ ++/* ++ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds ++ */ ++static int kmmpd(void *data) ++{ ++ struct super_block *sb = (struct super_block *) data; ++ struct ext3_super_block *es = EXT3_SB(sb)->s_es; ++ struct buffer_head *bh = NULL; ++ struct mmp_struct *mmp; ++ unsigned long mmp_block; ++ u32 seq = 0; ++ unsigned long failed_writes = 0; ++ int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval); ++ unsigned mmp_check_interval; ++ unsigned long last_update_time; ++ unsigned long diff; ++ int retval; ++ ++ mmp_block = le64_to_cpu(es->s_mmp_block); ++ retval = read_mmp_block(sb, &bh, mmp_block); ++ if (retval) ++ goto failed; ++ ++ mmp = (struct mmp_struct *)(bh->b_data); ++ mmp->mmp_time = cpu_to_le64(get_seconds()); ++ /* ++ * Start with the higher mmp_check_interval and reduce it if ++ * the MMP block is being updated on time. ++ */ ++ mmp_check_interval = max(5 * mmp_update_interval, ++ EXT3_MMP_MIN_CHECK_INTERVAL); ++ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); ++ bdevname(bh->b_bdev, mmp->mmp_bdevname); ++ ++ down_read(&uts_sem); ++ memcpy(mmp->mmp_nodename, utsname()->nodename, ++ sizeof(mmp->mmp_nodename)); ++ up_read(&uts_sem); ++ ++ while (!kthread_should_stop()) { ++ if (++seq > EXT3_MMP_SEQ_MAX) ++ seq = 1; ++ ++ mmp->mmp_seq = cpu_to_le32(seq); ++ mmp->mmp_time = cpu_to_le64(get_seconds()); ++ last_update_time = jiffies; ++ ++ retval = write_mmp_block(bh); ++ /* ++ * Don't spew too many error messages. Print one every ++ * (s_mmp_update_interval * 60) seconds. ++ */ ++ if (retval && (failed_writes % 60) == 0) { ++ ext3_error(sb, __FUNCTION__, ++ "Error writing to MMP block"); ++ failed_writes++; ++ } ++ ++ if (!(le32_to_cpu(es->s_feature_incompat) & ++ EXT3_FEATURE_INCOMPAT_MMP)) { ++ ext3_warning(sb, __FUNCTION__, "kmmpd being stopped " ++ "since MMP feature has been disabled."); ++ EXT3_SB(sb)->s_mmp_tsk = 0; ++ goto failed; ++ } ++ ++ if (sb->s_flags & MS_RDONLY) { ++ ext3_warning(sb, __FUNCTION__, "kmmpd being stopped " ++ "since filesystem has been remounted as " ++ "readonly."); ++ EXT3_SB(sb)->s_mmp_tsk = 0; ++ goto failed; ++ } ++ ++ diff = jiffies - last_update_time; ++ if (diff < mmp_update_interval * HZ) ++ schedule_timeout_interruptible(EXT3_MMP_UPDATE_INTERVAL* ++ HZ - diff); ++ ++ /* ++ * We need to make sure that more than mmp_check_interval ++ * seconds have not passed since writing. If that has happened ++ * we need to check if the MMP block is as we left it. ++ */ ++ diff = jiffies - last_update_time; ++ if (diff > mmp_check_interval * HZ) { ++ struct buffer_head *bh_check = NULL; ++ struct mmp_struct *mmp_check; ++ ++ retval = read_mmp_block(sb, &bh_check, mmp_block); ++ if (retval) { ++ EXT3_SB(sb)->s_mmp_tsk = 0; ++ goto failed; ++ } ++ ++ mmp_check = (struct mmp_struct *)(bh_check->b_data); ++ if (mmp->mmp_time != mmp_check->mmp_time || ++ memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename, ++ sizeof(mmp->mmp_nodename))) ++ dump_mmp_msg(sb, mmp_check, __FUNCTION__, ++ "Error while updating MMP info. " ++ "The filesystem seems to have " ++ "been multiply mounted."); ++ ++ put_bh(bh_check); ++ } ++ ++ /* ++ * Adjust the mmp_check_interval depending on how much time ++ * it took for the MMP block to be written. ++ */ ++ mmp_check_interval = max(5 * diff / HZ, ++ (unsigned long) EXT3_MMP_MIN_CHECK_INTERVAL); ++ mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); ++ } ++ ++ /* ++ * Unmount seems to be clean. ++ */ ++ mmp->mmp_seq = cpu_to_le32(EXT3_MMP_SEQ_CLEAN); ++ mmp->mmp_time = cpu_to_le64(get_seconds()); ++ ++ retval = write_mmp_block(bh); ++ ++failed: ++ brelse(bh); ++ return retval; ++} ++ ++/* ++ * Get a random new sequence number but make sure it is not greater than ++ * EXT3_MMP_SEQ_MAX. ++ */ ++static unsigned int mmp_new_seq(void) ++{ ++ u32 new_seq; ++ ++ do { ++ get_random_bytes(&new_seq, sizeof(u32)); ++ } while (new_seq > EXT3_MMP_SEQ_MAX); ++ ++ return new_seq; ++} ++ ++/* ++ * Protect the filesystem from being mounted more than once. ++ */ ++static int ext3_multi_mount_protect(struct super_block *sb, ++ unsigned long mmp_block) ++{ ++ struct ext3_super_block *es = EXT3_SB(sb)->s_es; ++ struct buffer_head *bh = NULL; ++ struct mmp_struct *mmp = NULL; ++ u32 seq; ++ unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval); ++ int retval; ++ ++ if (mmp_block < le32_to_cpu(es->s_first_data_block) || ++ mmp_block >= le32_to_cpu(es->s_blocks_count)) { ++ ext3_warning(sb, __FUNCTION__, ++ "Invalid MMP block in superblock"); ++ goto failed; ++ } ++ ++ retval = read_mmp_block(sb, &bh, mmp_block); ++ if (retval) ++ goto failed; ++ ++ mmp = (struct mmp_struct *)(bh->b_data); ++ ++ if (mmp_check_interval < EXT3_MMP_MIN_CHECK_INTERVAL) ++ mmp_check_interval = EXT3_MMP_MIN_CHECK_INTERVAL; ++ ++ /* ++ * If check_interval in MMP block is larger, use that instead of ++ * update_interval from the superblock. ++ */ ++ if (mmp->mmp_check_interval > mmp_check_interval) ++ mmp_check_interval = mmp->mmp_check_interval; ++ ++ seq = le32_to_cpu(mmp->mmp_seq); ++ if (seq == EXT3_MMP_SEQ_CLEAN) ++ goto skip; ++ ++ if (seq == EXT3_MMP_SEQ_FSCK) { ++ dump_mmp_msg(sb, mmp, __FUNCTION__, ++ "fsck is running on the filesystem"); ++ goto failed; ++ } ++ ++ schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1)); ++ ++ retval = read_mmp_block(sb, &bh, mmp_block); ++ if (retval) ++ goto failed; ++ mmp = (struct mmp_struct *)(bh->b_data); ++ if (seq != le32_to_cpu(mmp->mmp_seq)) { ++ dump_mmp_msg(sb, mmp, __FUNCTION__, ++ "Device is already active on another node."); ++ goto failed; ++ } ++ ++skip: ++ /* ++ * write a new random sequence number. ++ */ ++ mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq()); ++ ++ retval = write_mmp_block(bh); ++ if (retval) ++ goto failed; ++ ++ /* ++ * wait for MMP interval and check mmp_seq. ++ */ ++ schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1)); ++ ++ retval = read_mmp_block(sb, &bh, mmp_block); ++ if (retval) ++ goto failed; ++ mmp = (struct mmp_struct *)(bh->b_data); ++ if (seq != le32_to_cpu(mmp->mmp_seq)) { ++ dump_mmp_msg(sb, mmp, __FUNCTION__, ++ "Device is already active on another node."); ++ goto failed; ++ } ++ ++ /* ++ * Start a kernel thread to update the MMP block periodically. ++ */ ++ EXT3_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%02x:%02x", ++ MAJOR(sb->s_dev), ++ MINOR(sb->s_dev)); ++ if (IS_ERR(EXT3_SB(sb)->s_mmp_tsk)) { ++ EXT3_SB(sb)->s_mmp_tsk = 0; ++ ext3_warning(sb, __FUNCTION__, "Unable to create kmmpd thread " ++ "for %s.", sb->s_id); ++ goto failed; ++ } ++ ++ brelse(bh); ++ return 0; ++ ++failed: ++ brelse(bh); ++ return 1; ++} ++ + + static int ext3_fill_super (struct super_block *sb, void *data, int silent) + { +@@ -1844,6 +2156,11 @@ static int ext3_fill_super (struct super + EXT3_HAS_INCOMPAT_FEATURE(sb, + EXT3_FEATURE_INCOMPAT_RECOVER)); + ++ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_MMP) && ++ !(sb->s_flags & MS_RDONLY)) ++ if (ext3_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) ++ goto failed_mount3; ++ + /* + * The first inode we look at is the journal inode. Don't try + * root first: it may be modified in the journal! +@@ -2446,7 +2763,7 @@ static int ext3_remount (struct super_bl + ext3_fsblk_t n_blocks_count = 0; + unsigned long old_sb_flags; + struct ext3_mount_options old_opts; +- int err; ++ int err = 0; + #ifdef CONFIG_QUOTA + int i; + #endif +@@ -2530,6 +2847,11 @@ static int ext3_remount (struct super_bl + } + if (!ext3_setup_super (sb, es, 0)) + sb->s_flags &= ~MS_RDONLY; ++ if (EXT3_HAS_INCOMPAT_FEATURE(sb, ++ EXT3_FEATURE_INCOMPAT_MMP)) ++ if (ext3_multi_mount_protect(sb, ++ le64_to_cpu(es->s_mmp_block))) ++ goto restore_opts; + } + } + #ifdef CONFIG_QUOTA +Index: linux-2.6.18/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.18.orig/include/linux/ext3_fs.h ++++ linux-2.6.18/include/linux/ext3_fs.h +@@ -593,13 +593,17 @@ struct ext3_super_block { + __le32 s_first_meta_bg; /* First metablock block group */ + __le32 s_mkfs_time; /* When the filesystem was created */ + __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ +- __le32 s_blocks_count_hi; /* Blocks count high 32 bits */ ++/*150*/ __le32 s_blocks_count_hi; /* Blocks count high 32 bits */ + __le32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/ + __le32 s_free_blocks_count_hi; /* Free blocks count high 32 bits */ + __le16 s_min_extra_isize; /* All inodes have at least # bytes */ + __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ +- __le32 s_flags; /* Miscellaneous flags */ +- __u32 s_reserved[167]; /* Padding to the end of the block */ ++/*160*/ __le32 s_flags; /* Miscellaneous flags */ ++ __le16 s_raid_stride; /* RAID stride */ ++ __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */ ++ __le64 s_mmp_block; /* Block for multi-mount protection */ ++/*170*/ __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ ++ __le32 s_reserved[163]; /* Padding to the end of the block */ + }; + + #ifdef __KERNEL__ +@@ -702,12 +706,14 @@ static inline int ext3_valid_inum(struct + #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */ + #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010 + #define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */ ++#define EXT3_FEATURE_INCOMPAT_MMP 0x0100 + + #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR + #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \ + EXT3_FEATURE_INCOMPAT_RECOVER| \ + EXT3_FEATURE_INCOMPAT_META_BG| \ +- EXT3_FEATURE_INCOMPAT_EXTENTS) ++ EXT3_FEATURE_INCOMPAT_EXTENTS| \ ++ EXT3_FEATURE_INCOMPAT_MMP) + #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ +@@ -870,6 +876,39 @@ ext3_group_first_block_no(struct super_b + #define ERR_BAD_DX_DIR -75000 + + /* ++ * This structure will be used for multiple mount protection. It will be ++ * written into the block number saved in the s_mmp_block field in the ++ * superblock. Programs that check MMP should assume that if ++ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe ++ * to use the filesystem, regardless of how old the timestamp is. ++ */ ++#define EXT3_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */ ++#define EXT3_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */ ++#define EXT3_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */ ++#define EXT3_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */ ++ ++struct mmp_struct { ++ __le32 mmp_magic; ++ __le32 mmp_seq; ++ __le64 mmp_time; ++ char mmp_nodename[64]; ++ char mmp_bdevname[32]; ++ __le16 mmp_check_interval; ++ __le16 mmp_pad1; ++ __le32 mmp_pad2[227]; ++}; ++ ++/* ++ * Default interval in seconds to update the MMP sequence number. ++ */ ++#define EXT3_MMP_UPDATE_INTERVAL 1 ++ ++/* ++ * Minimum interval for MMP checking in seconds. ++ */ ++#define EXT3_MMP_MIN_CHECK_INTERVAL 5 ++ ++/* + * Function prototypes + */ + +Index: linux-2.6.18/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.18.orig/include/linux/ext3_fs_sb.h ++++ linux-2.6.18/include/linux/ext3_fs_sb.h +@@ -151,6 +151,7 @@ struct ext3_sb_info { + /* locality groups */ + struct ext3_locality_group *s_locality_groups; + ++ struct task_struct *s_mmp_tsk; /* Kernel thread for multiple mount protection */ + }; + + #define EXT3_GROUP_INFO(sb, group) \ diff --git a/ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch new file mode 100644 index 0000000..1347a83 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-nanosecond-2.6.22-vanilla.patch @@ -0,0 +1,407 @@ +Index: linux-2.6.18.8/fs/ext3/ialloc.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/ialloc.c 2007-06-20 18:54:59.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/ialloc.c 2007-06-20 18:54:59.000000000 +0200 +@@ -729,7 +729,8 @@ got: + /* This is the optimal IO size (for stat), not the fs block size */ + inode->i_blksize = PAGE_SIZE; + inode->i_blocks = 0; +- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = ++ ext3_current_time(inode); + + memset(ei->i_data, 0, sizeof(ei->i_data)); + ei->i_dir_start_lookup = 0; +@@ -761,9 +762,8 @@ got: + spin_unlock(&sbi->s_next_gen_lock); + + ei->i_state = EXT3_STATE_NEW; +- ei->i_extra_isize = +- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ? +- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; ++ ++ ei->i_extra_isize = EXT3_SB(sb)->s_want_extra_isize; + + ret = inode; + if(DQUOT_ALLOC_INODE(inode)) { +Index: linux-2.6.18.8/fs/ext3/inode.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/inode.c 2007-06-20 18:54:52.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/inode.c 2007-06-20 18:54:59.000000000 +0200 +@@ -727,7 +727,7 @@ static int ext3_splice_branch(handle_t * + + /* We are done with atomic stuff, now do the rest of housekeeping */ + +- inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_ctime = ext3_current_time(inode); + ext3_mark_inode_dirty(handle, inode); + + /* had we spliced it onto indirect block? */ +@@ -2375,7 +2375,7 @@ do_indirects: + ext3_discard_reservation(inode); + + mutex_unlock(&ei->truncate_mutex); +- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_mtime = inode->i_ctime = ext3_current_time(inode); + ext3_mark_inode_dirty(handle, inode); + + /* +@@ -2611,10 +2611,6 @@ void ext3_read_inode(struct inode * inod + } + inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); + inode->i_size = le32_to_cpu(raw_inode->i_size); +- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); +- inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); +- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); +- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; + + ei->i_state = 0; + ei->i_dir_start_lookup = 0; +@@ -2689,6 +2685,11 @@ void ext3_read_inode(struct inode * inod + } else + ei->i_extra_isize = 0; + ++ EXT3_INODE_GET_XTIME(i_ctime, inode, raw_inode); ++ EXT3_INODE_GET_XTIME(i_mtime, inode, raw_inode); ++ EXT3_INODE_GET_XTIME(i_atime, inode, raw_inode); ++ EXT3_EINODE_GET_XTIME(i_crtime, ei, raw_inode); ++ + if (S_ISREG(inode->i_mode)) { + inode->i_op = &ext3_file_inode_operations; + inode->i_fop = &ext3_file_operations; +@@ -2769,9 +2770,12 @@ static int ext3_do_update_inode(handle_t + } + raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); + raw_inode->i_size = cpu_to_le32(ei->i_disksize); +- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); +- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); +- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); ++ ++ EXT3_INODE_SET_XTIME(i_ctime, inode, raw_inode); ++ EXT3_INODE_SET_XTIME(i_mtime, inode, raw_inode); ++ EXT3_INODE_SET_XTIME(i_atime, inode, raw_inode); ++ EXT3_EINODE_SET_XTIME(i_crtime, ei, raw_inode); ++ + raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); + raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); + raw_inode->i_flags = cpu_to_le32(ei->i_flags); +Index: linux-2.6.18.8/fs/ext3/ioctl.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/ioctl.c 2007-06-20 18:42:05.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/ioctl.c 2007-06-20 18:54:59.000000000 +0200 +@@ -120,7 +120,7 @@ int ext3_ioctl (struct inode * inode, st + ei->i_flags = flags; + + ext3_set_inode_flags(inode); +- inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_ctime = ext3_current_time(inode); + + err = ext3_mark_iloc_dirty(handle, inode, &iloc); + flags_err: +@@ -157,7 +157,7 @@ flags_err: + return PTR_ERR(handle); + err = ext3_reserve_inode_write(handle, inode, &iloc); + if (err == 0) { +- inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_ctime = ext3_current_time(inode); + inode->i_generation = generation; + err = ext3_mark_iloc_dirty(handle, inode, &iloc); + } +Index: linux-2.6.18.8/fs/ext3/namei.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/namei.c 2007-06-20 18:54:53.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/namei.c 2007-06-20 18:54:59.000000000 +0200 +@@ -1287,7 +1287,7 @@ static int add_dirent_to_buf(handle_t *h + * happen is that the times are slightly out of date + * and/or different from the directory change time. + */ +- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; ++ dir->i_mtime = dir->i_ctime = ext3_current_time(dir); + ext3_update_dx_flag(dir); + dir->i_version++; + ext3_mark_inode_dirty(handle, dir); +@@ -2079,7 +2079,7 @@ static int ext3_rmdir (struct inode * di + inode->i_version++; + inode->i_nlink = 0; + ext3_orphan_add(handle, inode); +- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; ++ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext3_current_time(inode); + ext3_mark_inode_dirty(handle, inode); + ext3_dec_count(handle, dir); + ext3_update_dx_flag(dir); +@@ -2129,13 +2129,13 @@ static int ext3_unlink(struct inode * di + retval = ext3_delete_entry(handle, dir, de, bh); + if (retval) + goto end_unlink; +- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; ++ dir->i_ctime = dir->i_mtime = ext3_current_time(dir); + ext3_update_dx_flag(dir); + ext3_mark_inode_dirty(handle, dir); + ext3_dec_count(handle, inode); + if (!inode->i_nlink) + ext3_orphan_add(handle, inode); +- inode->i_ctime = dir->i_ctime; ++ inode->i_ctime = ext3_current_time(inode); + ext3_mark_inode_dirty(handle, inode); + retval = 0; + +@@ -2237,8 +2237,8 @@ retry: + if (IS_DIRSYNC(dir)) + handle->h_sync = 1; + +- inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_ctime = ext3_current_time(inode); +- inc_nlink(inode); ++ ext3_inc_count(handle, inode); + atomic_inc(&inode->i_count); + + err = ext3_add_link(handle, dentry, inode); +@@ -2340,7 +2340,7 @@ static int ext3_rename (struct inode * o + * Like most other Unix systems, set the ctime for inodes on a + * rename. + */ +- old_inode->i_ctime = CURRENT_TIME_SEC; ++ old_inode->i_ctime = ext3_current_time(old_inode); + ext3_mark_inode_dirty(handle, old_inode); + + /* +@@ -2373,9 +2373,9 @@ static int ext3_rename (struct inode * o + + if (new_inode) { + ext3_dec_count(handle, new_inode); +- new_inode->i_ctime = CURRENT_TIME_SEC; ++ new_inode->i_ctime = ext3_current_time(new_inode); + } +- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC; ++ old_dir->i_ctime = old_dir->i_mtime = ext3_current_time(old_dir); + ext3_update_dx_flag(old_dir); + if (dir_bh) { + BUFFER_TRACE(dir_bh, "get_write_access"); +Index: linux-2.6.18.8/fs/ext3/super.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/super.c 2007-06-20 18:54:59.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/super.c 2007-06-20 18:54:59.000000000 +0200 +@@ -1713,6 +1713,8 @@ static int ext3_fill_super (struct super + sbi->s_inode_size); + goto failed_mount; + } ++ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) ++ sb->s_time_gran = 1 << (EXT3_EPOCH_BITS - 2); + } + sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << + le32_to_cpu(es->s_log_frag_size); +@@ -1917,6 +1919,32 @@ static int ext3_fill_super (struct super + } + + ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); ++ ++ /* determine the minimum size of new large inodes, if present */ ++ if (sbi->s_inode_size > EXT3_GOOD_OLD_INODE_SIZE) { ++ sbi->s_want_extra_isize = sizeof(struct ext3_inode) - ++ EXT3_GOOD_OLD_INODE_SIZE; ++ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ++ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { ++ if (sbi->s_want_extra_isize < ++ le16_to_cpu(es->s_want_extra_isize)) ++ sbi->s_want_extra_isize = ++ le16_to_cpu(es->s_want_extra_isize); ++ if (sbi->s_want_extra_isize < ++ le16_to_cpu(es->s_min_extra_isize)) ++ sbi->s_want_extra_isize = ++ le16_to_cpu(es->s_min_extra_isize); ++ } ++ } ++ /* Check if enough inode space is available */ ++ if (EXT3_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > ++ sbi->s_inode_size) { ++ sbi->s_want_extra_isize = sizeof(struct ext3_inode) - ++ EXT3_GOOD_OLD_INODE_SIZE; ++ printk(KERN_INFO "EXT3-fs: required extra inode space not" ++ "available.\n"); ++ } ++ + /* + * akpm: core read_super() calls in here with the superblock locked. + * That deadlocks, because orphan cleanup needs to lock the superblock +Index: linux-2.6.18.8/fs/ext3/xattr.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/xattr.c 2007-06-20 18:54:52.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/xattr.c 2007-06-20 18:54:59.000000000 +0200 +@@ -1007,7 +1007,7 @@ ext3_xattr_set_handle(handle_t *handle, + } + if (!error) { + ext3_xattr_update_super_block(handle, inode->i_sb); +- inode->i_ctime = CURRENT_TIME_SEC; ++ inode->i_ctime = ext3_current_time(inode); + error = ext3_mark_iloc_dirty(handle, inode, &is.iloc); + /* + * The bh is consumed by ext3_mark_iloc_dirty, even with +Index: linux-2.6.18.8/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs.h 2007-06-20 18:54:59.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs.h 2007-06-20 18:54:59.000000000 +0200 +@@ -288,7 +288,7 @@ struct ext3_inode { + __le16 i_uid; /* Low 16 bits of Owner Uid */ + __le32 i_size; /* Size in bytes */ + __le32 i_atime; /* Access time */ +- __le32 i_ctime; /* Creation time */ ++ __le32 i_ctime; /* Inode Change time */ + __le32 i_mtime; /* Modification time */ + __le32 i_dtime; /* Deletion Time */ + __le16 i_gid; /* Low 16 bits of Group Id */ +@@ -337,10 +337,73 @@ struct ext3_inode { + } osd2; /* OS dependent 2 */ + __le16 i_extra_isize; + __le16 i_pad1; ++ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */ ++ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */ ++ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ ++ __le32 i_crtime; /* File Creation time */ ++ __le32 i_crtime_extra; /* extra File Creation time (nsec << 2 | epoch) */ + }; + + #define i_size_high i_dir_acl + ++#define EXT3_EPOCH_BITS 2 ++#define EXT3_EPOCH_MASK ((1 << EXT3_EPOCH_BITS) - 1) ++#define EXT3_NSEC_MASK (~0UL << EXT3_EPOCH_BITS) ++ ++#define EXT3_FITS_IN_INODE(ext3_inode, einode, field) \ ++ ((offsetof(typeof(*ext3_inode), field) + \ ++ sizeof((ext3_inode)->field)) \ ++ <= (EXT3_GOOD_OLD_INODE_SIZE + \ ++ (einode)->i_extra_isize)) \ ++ ++static inline __le32 ext3_encode_extra_time(struct timespec *time) ++{ ++ return cpu_to_le32((sizeof(time->tv_sec) > 4 ? ++ time->tv_sec >> 32 : 0) | ++ ((time->tv_nsec << 2) & EXT3_NSEC_MASK)); ++} ++ ++static inline void ext3_decode_extra_time(struct timespec *time, __le32 extra) { ++ if (sizeof(time->tv_sec) > 4) ++ time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT3_EPOCH_MASK) ++ << 32; ++ time->tv_nsec = (le32_to_cpu(extra) & EXT3_NSEC_MASK) >> 2; ++} ++ ++#define EXT3_INODE_SET_XTIME(xtime, inode, raw_inode) \ ++do { \ ++ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \ ++ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra)) \ ++ (raw_inode)->xtime ## _extra = \ ++ ext3_encode_extra_time(&(inode)->xtime); \ ++} while (0) ++ ++#define EXT3_EINODE_SET_XTIME(xtime, einode, raw_inode)\ ++do { \ ++ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \ ++ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \ ++ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ ++ (raw_inode)->xtime ## _extra = \ ++ ext3_encode_extra_time(&(einode)->xtime); \ ++} while (0) ++ ++#define EXT3_INODE_GET_XTIME(xtime, inode, raw_inode) \ ++do { \ ++ (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \ ++ if (EXT3_FITS_IN_INODE(raw_inode, EXT3_I(inode), xtime ## _extra)) \ ++ ext3_decode_extra_time(&(inode)->xtime, \ ++ raw_inode->xtime ## _extra); \ ++} while (0) ++ ++#define EXT3_EINODE_GET_XTIME(xtime, einode, raw_inode) \ ++do { \ ++ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime)) \ ++ (einode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \ ++ if (EXT3_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ ++ ext3_decode_extra_time(&(einode)->xtime, \ ++ raw_inode->xtime ## _extra); \ ++} while (0) ++ + #if defined(__KERNEL__) || defined(__linux__) + #define i_reserved1 osd1.linux1.l_i_reserved1 + #define i_frag osd2.linux2.l_i_frag +@@ -520,11 +583,19 @@ struct ext3_super_block { + __le32 s_last_orphan; /* start of list of inodes to delete */ + __le32 s_hash_seed[4]; /* HTREE hash seed */ + __u8 s_def_hash_version; /* Default hash version to use */ +- __u8 s_reserved_char_pad; +- __u16 s_reserved_word_pad; ++ __u8 s_jnl_backup_type; /* Default type of journal backup */ ++ __le16 s_desc_size; /* Group desc. size: INCOMPAT_64BIT */ + __le32 s_default_mount_opts; +- __le32 s_first_meta_bg; /* First metablock block group */ +- __u32 s_reserved[190]; /* Padding to the end of the block */ ++ __le32 s_first_meta_bg; /* First metablock block group */ ++ __le32 s_mkfs_time; /* When the filesystem was created */ ++ __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ ++ __le32 s_blocks_count_hi; /* Blocks count high 32 bits */ ++ __le32 s_r_blocks_count_hi; /* Reserved blocks count high 32 bits*/ ++ __le32 s_free_blocks_count_hi; /* Free blocks count high 32 bits */ ++ __le16 s_min_extra_isize; /* All inodes have at least # bytes */ ++ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ ++ __le32 s_flags; /* Miscellaneous flags */ ++ __u32 s_reserved[167]; /* Padding to the end of the block */ + }; + + #ifdef __KERNEL__ +@@ -539,6 +610,13 @@ static inline struct ext3_inode_info *EX + return container_of(inode, struct ext3_inode_info, vfs_inode); + } + ++static inline struct timespec ext3_current_time(struct inode *inode) ++{ ++ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ? ++ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC; ++} ++ ++ + static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino) + { + return ino == EXT3_ROOT_INO || +@@ -611,6 +689,8 @@ static inline int ext3_valid_inum(struct + #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 + #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 + #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 ++#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 ++ + + #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001 + #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002 +@@ -628,6 +708,7 @@ static inline int ext3_valid_inum(struct + EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ + EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \ ++ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE| \ + EXT3_FEATURE_RO_COMPAT_BTREE_DIR) + + /* +Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h 2007-06-20 18:54:54.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h 2007-06-20 18:54:59.000000000 +0200 +@@ -68,6 +68,9 @@ struct ext3_sb_info { + /* Last group used to allocate inode */ + int s_last_alloc_group; + ++ /* New inodes should reserve # bytes */ ++ unsigned int s_want_extra_isize; ++ + /* root of the per fs reservation window tree */ + spinlock_t s_rsv_window_lock; + struct rb_root s_rsv_window_root; +Index: linux-2.6.18.8/include/linux/ext3_fs_i.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_i.h 2007-06-20 18:54:57.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs_i.h 2007-06-20 18:54:59.000000000 +0200 +@@ -140,6 +140,8 @@ struct ext3_inode_info { + /* on-disk additional length */ + __u16 i_extra_isize; + ++ struct timespec i_crtime; ++ + /* + * truncate_mutex is for serialising ext3_truncate() against + * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's diff --git a/ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch new file mode 100644 index 0000000..0f775ee --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-nlinks-2.6.22-vanilla.patch @@ -0,0 +1,171 @@ +Index: linux-2.6.12/fs/ext3/namei.c +=================================================================== +--- linux-2.6.12.orig/fs/ext3/namei.c ++++ linux-2.6.12/fs/ext3/namei.c +@@ -1600,6 +1600,22 @@ + return -ENOENT; + } + ++static inline void ext3_inc_count(handle_t * handle, struct inode *inode) ++{ ++ inc_nlink(inode); ++ if (is_dx(inode) && inode->i_nlink > 1) { ++ /* limit is 16-bit i_links_count */ ++ if (inode->i_nlink >= EXT3_LINK_MAX || inode->i_nlink == 2) ++ inode->i_nlink = 1; ++ } ++} ++ ++static inline void ext3_dec_count(handle_t * handle, struct inode *inode) ++{ ++ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) ++ drop_nlink(inode); ++} ++ + static int ext3_add_nondir(handle_t *handle, + struct dentry *dentry, struct inode *inode) + { +@@ -1659,7 +1659,7 @@ static int ext3_add_nondir(handle_t *han + d_instantiate(dentry, inode); + return 0; + } +- drop_nlink(inode); ++ ext3_dec_count(handle, inode); + iput(inode); + return err; + } +@@ -1703,7 +1709,7 @@ static int ext3_mkdir(struct inode * dir + struct ext3_dir_entry_2 * de; + int err, retries = 0; + +- if (dir->i_nlink >= EXT3_LINK_MAX) ++ if (EXT3_DIR_LINK_MAX(dir)) + return -EMLINK; + + retry: +@@ -1758,7 +1764,7 @@ retry: + iput (inode); + goto out_stop; + } +- inc_nlink(dir); ++ ext3_inc_count(handle, dir); + ext3_update_dx_flag(dir); + ext3_mark_inode_dirty(handle, dir); + d_instantiate(dentry, inode); +@@ -2023,10 +2029,10 @@ static int ext3_rmdir (struct inode * di + retval = ext3_delete_entry(handle, dir, de, bh); + if (retval) + goto end_rmdir; +- if (inode->i_nlink != 2) +- ext3_warning (inode->i_sb, "ext3_rmdir", +- "empty directory has nlink!=2 (%d)", +- inode->i_nlink); ++ if (!EXT3_DIR_LINK_EMPTY(inode)) ++ ext3_warning(inode->i_sb, "ext3_rmdir", ++ "empty directory has too many links (%d)", ++ inode->i_nlink); + inode->i_version++; + inode->i_nlink = 0; + /* There's no need to set i_disksize: the fact that i_nlink is +@@ -2036,7 +2042,7 @@ static int ext3_rmdir (struct inode * di + ext3_orphan_add(handle, inode); + inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; + ext3_mark_inode_dirty(handle, inode); +- drop_nlink(dir); ++ ext3_dec_count(handle, dir); + ext3_update_dx_flag(dir); + ext3_mark_inode_dirty(handle, dir); + +@@ -2087,7 +2093,7 @@ static int ext3_unlink(struct inode * di + dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; + ext3_update_dx_flag(dir); + ext3_mark_inode_dirty(handle, dir); +- drop_nlink(inode); ++ ext3_dec_count(handle, inode); + if (!inode->i_nlink) + ext3_orphan_add(handle, inode); + inode->i_ctime = dir->i_ctime; +@@ -2160,7 +2190,7 @@ retry: + err = __page_symlink(inode, symname, l, + mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + if (err) { +- drop_nlink(inode); ++ ext3_dec_count(handle, inode); + ext3_mark_inode_dirty(handle, inode); + iput (inode); + goto out_stop; +@@ -2162,7 +2168,7 @@ static int ext3_link (struct dentry * ol + struct inode *inode = old_dentry->d_inode; + int err, retries = 0; + +- if (inode->i_nlink >= EXT3_LINK_MAX) ++ if (EXT3_DIR_LINK_MAX(inode)) + return -EMLINK; + + retry: +@@ -2249,8 +2255,8 @@ static int ext3_rename (struct inode * o + if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino) + goto end_rename; + retval = -EMLINK; +- if (!new_inode && new_dir!=old_dir && +- new_dir->i_nlink >= EXT3_LINK_MAX) ++ if (!new_inode && new_dir != old_dir && ++ EXT3_DIR_LINK_MAX(new_dir)) + goto end_rename; + } + if (!new_bh) { +@@ -2307,7 +2313,7 @@ static int ext3_rename (struct inode * o + } + + if (new_inode) { +- drop_nlink(new_inode); ++ ext3_dec_count(handle, new_inode); + new_inode->i_ctime = CURRENT_TIME_SEC; + } + old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC; +@@ -2318,11 +2324,13 @@ static int ext3_rename (struct inode * o + PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); + BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata"); + ext3_journal_dirty_metadata(handle, dir_bh); +- drop_nlink(old_dir); ++ ext3_dec_count(handle, old_dir); + if (new_inode) { +- drop_nlink(new_inode); ++ /* checked empty_dir above, can't have another parent, ++ * ext3_dec_count() won't work for many-linked dirs */ ++ new_inode->i_nlink = 0; + } else { +- inc_nlink(new_dir); ++ ext3_inc_count(handle, new_dir); + ext3_update_dx_flag(new_dir); + ext3_mark_inode_dirty(handle, new_dir); + } +Index: linux-2.6.12/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.12.orig/include/linux/ext3_fs.h ++++ linux-2.6.12/include/linux/ext3_fs.h +@@ -78,7 +78,7 @@ struct statfs; + /* + * Maximal count of links to a file + */ +-#define EXT3_LINK_MAX 32000 ++#define EXT3_LINK_MAX 65000 + + /* + * Macro-instructions used to manage several block sizes +@@ -539,6 +539,7 @@ static inline struct ext3_inode_info *EX + #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 + #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 + #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 ++#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 + + #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001 + #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002 +@@ -552,6 +553,7 @@ static inline struct ext3_inode_info *EX + EXT3_FEATURE_INCOMPAT_META_BG) + #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \ ++ EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \ + EXT3_FEATURE_RO_COMPAT_BTREE_DIR) + + /* diff --git a/ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch b/ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch new file mode 100644 index 0000000..19afc51 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-statfs-2.6.22.patch @@ -0,0 +1,71 @@ +Index: linux-2.6.18.8/fs/ext3/super.c +=================================================================== +--- linux-2.6.18.8.orig/fs/ext3/super.c 2007-07-20 16:51:14.000000000 +0200 ++++ linux-2.6.18.8/fs/ext3/super.c 2007-07-20 16:54:17.000000000 +0200 +@@ -2572,19 +2572,19 @@ static int ext3_statfs (struct dentry * + struct super_block *sb = dentry->d_sb; + struct ext3_sb_info *sbi = EXT3_SB(sb); + struct ext3_super_block *es = sbi->s_es; +- ext3_fsblk_t overhead; +- int i; + u64 fsid; + +- if (test_opt (sb, MINIX_DF)) +- overhead = 0; +- else { +- unsigned long ngroups; +- ngroups = EXT3_SB(sb)->s_groups_count; ++ if (test_opt(sb, MINIX_DF)) { ++ sbi->s_overhead_last = 0; ++ } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { ++ unsigned long ngroups = sbi->s_groups_count, i; ++ ext3_fsblk_t overhead = 0; + smp_rmb(); + + /* +- * Compute the overhead (FS structures) ++ * Compute the overhead (FS structures). This is constant ++ * for a given filesystem unless the number of block groups ++ * changes so we cache the previous value until it does. + */ + + /* +@@ -2605,18 +2605,23 @@ static int ext3_statfs (struct dentry * + * Every block group has an inode bitmap, a block + * bitmap, and an inode table. + */ +- overhead += (ngroups * (2 + EXT3_SB(sb)->s_itb_per_group)); ++ overhead += ngroups * (2 + sbi->s_itb_per_group); ++ sbi->s_overhead_last = overhead; ++ smp_wmb(); ++ sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); + } + + buf->f_type = EXT3_SUPER_MAGIC; + buf->f_bsize = sb->s_blocksize; +- buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead; ++ buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; + buf->f_bfree = percpu_counter_sum(&sbi->s_freeblocks_counter); ++ es->s_free_blocks_count = cpu_to_le32(buf->f_bfree); + buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); + if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) + buf->f_bavail = 0; + buf->f_files = le32_to_cpu(es->s_inodes_count); + buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); ++ es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); + buf->f_namelen = EXT3_NAME_LEN; + return 0; + } +Index: linux-2.6.18.8/include/linux/ext3_fs_sb.h +=================================================================== +--- linux-2.6.18.8.orig/include/linux/ext3_fs_sb.h 2007-07-20 16:51:23.000000000 +0200 ++++ linux-2.6.18.8/include/linux/ext3_fs_sb.h 2007-07-20 16:51:43.000000000 +0200 +@@ -45,6 +45,8 @@ struct ext3_sb_info { + unsigned long s_gdb_count; /* Number of group descriptor blocks */ + unsigned long s_desc_per_block; /* Number of group descriptors per block */ + unsigned long s_groups_count; /* Number of groups in the fs */ ++ unsigned long s_overhead_last; /* Last calculated overhead */ ++ unsigned long s_blocks_last; /* Last seen block count */ + struct buffer_head * s_sbh; /* Buffer containing the super block */ + struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */ + struct buffer_head ** s_group_desc; diff --git a/ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch new file mode 100644 index 0000000..fb63542 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/ext3-uninit-2.6.22-vanilla.patch @@ -0,0 +1,664 @@ +Add support for the uninit_groups feature to the kernel. + +Keep a high water mark of used inodes for each group to improve e2fsck time. +Block and inode bitmaps can be uninitialized on disk via a flag in the +group descriptor to avoid reading or scanning them at e2fsck time. +A checksum of each group descriptor is used to ensure that corruption in +the group descriptor's bit flags does not cause incorrect operation. + +Index: linux-rhel5/include/linux/ext3_fs.h +=================================================================== +--- linux-rhel5.orig/include/linux/ext3_fs.h 2007-07-18 17:32:04.000000000 +0200 ++++ linux-rhel5/include/linux/ext3_fs.h 2007-07-18 17:32:15.000000000 +0200 +@@ -150,16 +150,22 @@ struct ext3_allocation_request { + */ + struct ext3_group_desc + { +- __le32 bg_block_bitmap; /* Blocks bitmap block */ +- __le32 bg_inode_bitmap; /* Inodes bitmap block */ ++ __le32 bg_block_bitmap; /* Blocks bitmap block */ ++ __le32 bg_inode_bitmap; /* Inodes bitmap block */ + __le32 bg_inode_table; /* Inodes table block */ + __le16 bg_free_blocks_count; /* Free blocks count */ + __le16 bg_free_inodes_count; /* Free inodes count */ + __le16 bg_used_dirs_count; /* Directories count */ +- __u16 bg_pad; +- __le32 bg_reserved[3]; ++ __le16 bg_flags; /* EXT3_BG_flags (UNINIT, etc) */ ++ __le32 bg_reserved[2]; /* Likely block/inode bitmap checksum */ ++ __le16 bg_itable_unused; /* Unused inodes count */ ++ __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */ + }; + ++#define EXT3_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ ++#define EXT3_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ ++#define EXT3_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ ++ + /* + * Macro-instructions used to manage group descriptors + */ +@@ -603,6 +609,7 @@ static inline int ext3_valid_inum(struct + #define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 + #define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 + #define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 ++#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 + #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 + + #define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001 +@@ -619,6 +626,7 @@ static inline int ext3_valid_inum(struct + EXT3_FEATURE_INCOMPAT_EXTENTS) + #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \ ++ EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ + EXT4_FEATURE_RO_COMPAT_DIR_NLINK| \ + EXT3_FEATURE_RO_COMPAT_BTREE_DIR) + +Index: linux-rhel5/fs/ext3/resize.c +=================================================================== +--- linux-rhel5.orig/fs/ext3/resize.c 2007-07-15 09:36:00.000000000 +0200 ++++ linux-rhel5/fs/ext3/resize.c 2007-07-18 17:32:15.000000000 +0200 +@@ -18,6 +18,7 @@ + #include + #include + ++#include "group.h" + + #define outside(b, first, last) ((b) < (first) || (b) >= (last)) + #define inside(b, first, last) ((b) >= (first) && (b) < (last)) +@@ -834,6 +835,7 @@ int ext3_group_add(struct super_block *s + gdp->bg_inode_table = cpu_to_le32(input->inode_table); + gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count); + gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb)); ++ gdp->bg_checksum = ext3_group_desc_csum(sbi, input->group, gdp); + + /* + * Make the new blocks and inodes valid next. We do this before +Index: linux-rhel5/fs/ext3/super.c +=================================================================== +--- linux-rhel5.orig/fs/ext3/super.c 2007-07-18 17:32:06.000000000 +0200 ++++ linux-rhel5/fs/ext3/super.c 2007-07-18 17:35:03.000000000 +0200 +@@ -41,6 +41,7 @@ + #include "xattr.h" + #include "acl.h" + #include "namei.h" ++#include "group.h" + + static int ext3_load_journal(struct super_block *, struct ext3_super_block *, + unsigned long journal_devnum); +@@ -1225,6 +1226,91 @@ static int ext3_setup_super(struct super + return res; + } + ++#if !defined(CONFIG_CRC16) && !defined(CONFIG_CRC16_MODULE) ++/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ ++__u16 const crc16_table[256] = { ++ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, ++ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, ++ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, ++ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, ++ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, ++ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, ++ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, ++ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, ++ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, ++ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, ++ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, ++ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, ++ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, ++ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, ++ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, ++ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, ++ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, ++ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, ++ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, ++ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, ++ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, ++ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, ++ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, ++ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, ++ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, ++ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, ++ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, ++ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, ++ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, ++ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, ++ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, ++ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 ++}; ++ ++static inline __u16 crc16_byte(__u16 crc, const __u8 data) ++{ ++ return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; ++} ++ ++__u16 crc16(__u16 crc, __u8 const *buffer, size_t len) ++{ ++ while (len--) ++ crc = crc16_byte(crc, *buffer++); ++ return crc; ++} ++#endif ++ ++__le16 ext3_group_desc_csum(struct ext3_sb_info *sbi, __u32 block_group, ++ struct ext3_group_desc *gdp) ++{ ++ __u16 crc = 0; ++ ++ if (sbi->s_es->s_feature_ro_compat & ++ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { ++ int offset = offsetof(struct ext3_group_desc, bg_checksum); ++ __le32 le_group = cpu_to_le32(block_group); ++ ++ crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); ++ crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); ++ crc = crc16(crc, (__u8 *)gdp, offset); ++ offset += sizeof(gdp->bg_checksum); /* skip checksum */ ++ BUG_ON(offset != sizeof(*gdp)); /* XXX handle s_desc_size */ ++ /* for checksum of struct ext4_group_desc do the rest... ++ if ((sbi->s_es->s_feature_incompat & ++ cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && ++ offset < le16_to_cpu(sbi->s_es->s_desc_size)) { ++ crc = crc16(crc, (__u8 *)gdp + offset, ++ le16_to_cpu(sbi->s_es->s_desc_size) - ++ offset); ++ */ ++ } ++ ++ return cpu_to_le16(crc); ++} ++ ++int ext3_group_desc_csum_verify(struct ext3_sb_info *sbi, __u32 block_group, ++ struct ext3_group_desc *gdp) ++{ ++ return (gdp->bg_checksum == ++ ext3_group_desc_csum(sbi, block_group, gdp)); ++} ++ + /* Called at mount-time, super-block is locked */ + static int ext3_check_descriptors (struct super_block * sb) + { +@@ -1279,6 +1365,13 @@ static int ext3_check_descriptors (struc + le32_to_cpu(gdp->bg_inode_table)); + return 0; + } ++ if (!ext3_group_desc_csum_verify(sbi, i, gdp)) { ++ ext3_error(sb, __FUNCTION__, ++ "Checksum for group %d failed (%u!=%u)\n", i, ++ le16_to_cpu(ext3_group_desc_csum(sbi,i,gdp)), ++ le16_to_cpu(gdp->bg_checksum)); ++ return 0; ++ } + first_block += EXT3_BLOCKS_PER_GROUP(sb); + gdp++; + } +Index: linux-rhel5/fs/ext3/group.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-rhel5/fs/ext3/group.h 2007-07-18 17:32:15.000000000 +0200 +@@ -0,0 +1,29 @@ ++/* ++ * linux/fs/ext3/group.h ++ * ++ * Copyright (C) 2007 Cluster File Systems, Inc ++ * ++ * Author: Andreas Dilger ++ */ ++ ++#ifndef _LINUX_EXT3_GROUP_H ++#define _LINUX_EXT3_GROUP_H ++#if defined(CONFIG_CRC16) || defined(CONFIG_CRC16_MODULE) ++#include ++#endif ++ ++extern __le16 ext3_group_desc_csum(struct ext3_sb_info *sbi, __u32 group, ++ struct ext3_group_desc *gdp); ++extern int ext3_group_desc_csum_verify(struct ext3_sb_info *sbi, __u32 group, ++ struct ext3_group_desc *gdp); ++struct buffer_head *read_block_bitmap(struct super_block *sb, ++ unsigned int block_group); ++extern unsigned ext3_init_block_bitmap(struct super_block *sb, ++ struct buffer_head *bh, int group, ++ struct ext3_group_desc *desc); ++#define ext3_free_blocks_after_init(sb, group, desc) \ ++ ext3_init_block_bitmap(sb, NULL, group, desc) ++extern unsigned ext3_init_inode_bitmap(struct super_block *sb, ++ struct buffer_head *bh, int group, ++ struct ext3_group_desc *desc); ++#endif /* _LINUX_EXT3_GROUP_H */ +Index: linux-rhel5/fs/ext3/ialloc.c +=================================================================== +--- linux-rhel5.orig/fs/ext3/ialloc.c 2007-07-18 17:32:05.000000000 +0200 ++++ linux-rhel5/fs/ext3/ialloc.c 2007-07-18 17:32:15.000000000 +0200 +@@ -28,6 +28,7 @@ + + #include "xattr.h" + #include "acl.h" ++#include "group.h" + + /* + * ialloc.c contains the inodes allocation and deallocation routines +@@ -43,6 +44,52 @@ + * the free blocks count in the block. + */ + ++/* ++ * To avoid calling the atomic setbit hundreds or thousands of times, we only ++ * need to use it within a single byte (to ensure we get endianness right). ++ * We can use memset for the rest of the bitmap as there are no other users. ++ */ ++static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap) ++{ ++ int i; ++ ++ if (start_bit >= end_bit) ++ return; ++ ++ ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); ++ for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) ++ ext3_set_bit(i, bitmap); ++ if (i < end_bit) ++ memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); ++} ++ ++/* Initializes an uninitialized inode bitmap */ ++unsigned ext3_init_inode_bitmap(struct super_block *sb, ++ struct buffer_head *bh, int block_group, ++ struct ext3_group_desc *gdp) ++{ ++ struct ext3_sb_info *sbi = EXT3_SB(sb); ++ ++ J_ASSERT_BH(bh, buffer_locked(bh)); ++ ++ /* If checksum is bad mark all blocks and inodes used to prevent ++ * allocation, essentially implementing a per-group read-only flag. */ ++ if (!ext3_group_desc_csum_verify(sbi, block_group, gdp)) { ++ ext3_error(sb, __FUNCTION__, "Checksum bad for group %u\n", ++ block_group); ++ gdp->bg_free_blocks_count = 0; ++ gdp->bg_free_inodes_count = 0; ++ gdp->bg_itable_unused = 0; ++ memset(bh->b_data, 0xff, sb->s_blocksize); ++ return 0; ++ } ++ ++ memset(bh->b_data, 0, (EXT3_INODES_PER_GROUP(sb) + 7) / 8); ++ mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb), ++ bh->b_data); ++ ++ return EXT3_INODES_PER_GROUP(sb); ++} + + /* + * Read the inode allocation bitmap for a given block_group, reading +@@ -59,8 +106,19 @@ read_inode_bitmap(struct super_block * s + desc = ext3_get_group_desc(sb, block_group, NULL); + if (!desc) + goto error_out; +- +- bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap)); ++ if (desc->bg_flags & cpu_to_le16(EXT3_BG_INODE_UNINIT)) { ++ bh = sb_getblk(sb, le32_to_cpu(desc->bg_inode_bitmap)); ++ if (!buffer_uptodate(bh)) { ++ lock_buffer(bh); ++ if (!buffer_uptodate(bh)) { ++ ext3_init_inode_bitmap(sb, bh,block_group,desc); ++ set_buffer_uptodate(bh); ++ } ++ unlock_buffer(bh); ++ } ++ } else { ++ bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap)); ++ } + if (!bh) + ext3_error(sb, "read_inode_bitmap", + "Cannot read inode bitmap - " +@@ -169,6 +227,8 @@ void ext3_free_inode (handle_t *handle, + if (is_directory) + gdp->bg_used_dirs_count = cpu_to_le16( + le16_to_cpu(gdp->bg_used_dirs_count) - 1); ++ gdp->bg_checksum = ext3_group_desc_csum(sbi,block_group, ++ gdp); + spin_unlock(sb_bgl_lock(sbi, block_group)); + percpu_counter_inc(&sbi->s_freeinodes_counter); + if (is_directory) +@@ -454,7 +514,7 @@ struct inode *ext3_new_inode(handle_t *h + struct ext3_sb_info *sbi; + int err = 0; + struct inode *ret; +- int i; ++ int i, free = 0; + + /* Cannot create files in a deleted directory */ + if (!dir || !dir->i_nlink) +@@ -571,11 +631,13 @@ repeat_in_this_group: + goto out; + + got: +- ino += group * EXT3_INODES_PER_GROUP(sb) + 1; +- if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { +- ext3_error (sb, "ext3_new_inode", +- "reserved inode or inode > inodes count - " +- "block_group = %d, inode=%lu", group, ino); ++ ino++; ++ if ((group == 0 && ino < EXT3_FIRST_INO(sb)) || ++ ino > EXT3_INODES_PER_GROUP(sb)) { ++ ext3_error(sb, __FUNCTION__, ++ "reserved inode or inode > inodes count - " ++ "block_group = %d, inode=%lu", group, ++ ino + group * EXT3_INODES_PER_GROUP(sb)); + err = -EIO; + goto fail; + } +@@ -583,13 +645,64 @@ got: + BUFFER_TRACE(bh2, "get_write_access"); + err = ext3_journal_get_write_access(handle, bh2); + if (err) goto fail; ++ ++ /* We may have to initialize the block bitmap if it isn't already */ ++ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && ++ gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) { ++ struct buffer_head *block_bh = read_block_bitmap(sb, group); ++ ++ BUFFER_TRACE(block_bh, "get block bitmap access"); ++ err = ext3_journal_get_write_access(handle, block_bh); ++ if (err) { ++ brelse(block_bh); ++ goto fail; ++ } ++ ++ free = 0; ++ spin_lock(sb_bgl_lock(sbi, group)); ++ /* recheck and clear flag under lock if we still need to */ ++ if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) { ++ gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT); ++ free = ext3_free_blocks_after_init(sb, group, gdp); ++ gdp->bg_free_blocks_count = cpu_to_le16(free); ++ } ++ spin_unlock(sb_bgl_lock(sbi, group)); ++ ++ /* Don't need to dirty bitmap block if we didn't change it */ ++ if (free) { ++ BUFFER_TRACE(block_bh, "dirty block bitmap"); ++ err = ext3_journal_dirty_metadata(handle, block_bh); ++ } ++ ++ brelse(block_bh); ++ if (err) ++ goto fail; ++ } ++ + spin_lock(sb_bgl_lock(sbi, group)); ++ /* If we didn't allocate from within the initialized part of the inode ++ * table then we need to initialize up to this inode. */ ++ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { ++ if (gdp->bg_flags & cpu_to_le16(EXT3_BG_INODE_UNINIT)) { ++ gdp->bg_flags &= cpu_to_le16(~EXT3_BG_INODE_UNINIT); ++ free = 0; ++ } else { ++ free = EXT3_INODES_PER_GROUP(sb) - ++ le16_to_cpu(gdp->bg_itable_unused); ++ } ++ ++ if (ino > free) ++ gdp->bg_itable_unused = ++ cpu_to_le16(EXT3_INODES_PER_GROUP(sb) - ino); ++ } ++ + gdp->bg_free_inodes_count = + cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1); + if (S_ISDIR(mode)) { + gdp->bg_used_dirs_count = + cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1); + } ++ gdp->bg_checksum = ext3_group_desc_csum(sbi, group, gdp); + spin_unlock(sb_bgl_lock(sbi, group)); + BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); + err = ext3_journal_dirty_metadata(handle, bh2); +@@ -611,7 +724,7 @@ got: + inode->i_gid = current->fsgid; + inode->i_mode = mode; + +- inode->i_ino = ino; ++ inode->i_ino = ino + group * EXT3_INODES_PER_GROUP(sb); + /* This is the optimal IO size (for stat), not the fs block size */ + inode->i_blocks = 0; + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; +Index: linux-rhel5/fs/ext3/mballoc.c +=================================================================== +--- linux-rhel5.orig/fs/ext3/mballoc.c 2007-07-18 17:32:04.000000000 +0200 ++++ linux-rhel5/fs/ext3/mballoc.c 2007-07-18 17:32:15.000000000 +0200 +@@ -36,6 +36,8 @@ + #include + #include + ++#include "group.h" ++ + /* + * MUSTDO: + * - test ext3_ext_search_left() and ext3_ext_search_right() +@@ -323,6 +325,7 @@ struct ext3_group_info { + unsigned long bb_state; + unsigned long bb_tid; + struct ext3_free_metadata *bb_md_cur; ++ struct ext3_group_desc *bb_gdp; + unsigned short bb_first_free; + unsigned short bb_free; + unsigned short bb_fragments; +@@ -943,10 +946,7 @@ static int ext3_mb_init_cache(struct pag + if (first_group + i >= EXT3_SB(sb)->s_groups_count) + break; + +- err = -EIO; +- desc = ext3_get_group_desc(sb, first_group + i, NULL); +- if (desc == NULL) +- goto out; ++ desc = EXT3_GROUP_INFO(sb, first_group + i)->bb_gdp; + + err = -ENOMEM; + bh[i] = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap)); +@@ -961,7 +961,12 @@ static int ext3_mb_init_cache(struct pag + unlock_buffer(bh[i]); + continue; + } +- ++ if (desc->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) { ++ ext3_init_block_bitmap(sb, bh[i], first_group + i,desc); ++ set_buffer_uptodate(bh[i]); ++ unlock_buffer(bh[i]); ++ continue; ++ } + get_bh(bh[i]); + bh[i]->b_end_io = end_buffer_read_sync; + submit_bh(READ, bh[i]); +@@ -1732,6 +1737,10 @@ static int ext3_mb_good_group(struct ext + switch (cr) { + case 0: + BUG_ON(ac->ac_2order == 0); ++ /* If this group is uninitialized, skip it initially */ ++ if (grp->bb_gdp->bg_flags & ++ cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) ++ return 0; + bits = ac->ac_sb->s_blocksize_bits + 1; + for (i = ac->ac_2order; i <= bits; i++) + if (grp->bb_counters[i] > 0) +@@ -1825,7 +1834,9 @@ repeat: + } + + ac->ac_groups_scanned++; +- if (cr == 0) ++ if (cr == 0 || (e3b.bd_info->bb_gdp->bg_flags & ++ cpu_to_le16(EXT3_BG_BLOCK_UNINIT) && ++ ac->ac_2order != 0)) + ext3_mb_simple_scan_group(ac, &e3b); + else if (cr == 1 && ac->ac_g_ex.fe_len == sbi->s_stripe) + ext3_mb_scan_aligned(ac, &e3b); +@@ -2304,12 +2315,13 @@ int ext3_mb_init_backend(struct super_bl + i--; + goto err_freebuddy; + } ++ memset(meta_group_info[j], 0, len); + desc = ext3_get_group_desc(sb, i, NULL); ++ meta_group_info[j]->bb_gdp = desc; + if (desc == NULL) { + printk(KERN_ERR"EXT3-fs: can't read descriptor %u\n",i); + goto err_freebuddy; + } +- memset(meta_group_info[j], 0, len); + set_bit(EXT3_GROUP_INFO_NEED_INIT_BIT, + &meta_group_info[j]->bb_state); + +@@ -2958,9 +2970,17 @@ int ext3_mb_mark_diskspace_used(struct e + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); + + spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); ++ if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) { ++ gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT); ++ gdp->bg_free_blocks_count = ++ cpu_to_le16(ext3_free_blocks_after_init(sb, ++ ac->ac_b_ex.fe_group, ++ gdp)); ++ } + gdp->bg_free_blocks_count = + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + - ac->ac_b_ex.fe_len); ++ gdp->bg_checksum = ext3_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); + spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); + percpu_counter_mod(&sbi->s_freeblocks_counter, - ac->ac_b_ex.fe_len); + +@@ -4346,6 +4366,7 @@ do_more: + spin_lock(sb_bgl_lock(sbi, block_group)); + gdp->bg_free_blocks_count = + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count); ++ gdp->bg_checksum = ext3_group_desc_csum(sbi, block_group, gdp); + spin_unlock(sb_bgl_lock(sbi, block_group)); + percpu_counter_mod(&sbi->s_freeblocks_counter, count); + +Index: linux-rhel5/fs/ext3/balloc.c +=================================================================== +--- linux-rhel5.orig/fs/ext3/balloc.c 2007-07-18 17:32:04.000000000 +0200 ++++ linux-rhel5/fs/ext3/balloc.c 2007-07-18 17:32:15.000000000 +0200 +@@ -20,6 +20,7 @@ + #include + #include + ++#include "group.h" + /* + * balloc.c contains the blocks allocation and deallocation routines + */ +@@ -73,6 +74,75 @@ struct ext3_group_desc * ext3_get_group_ + return desc + offset; + } + ++/* Initializes an uninitialized block bitmap if given, and returns the ++ * number of blocks free in the group. */ ++unsigned ext3_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ++ int block_group, struct ext3_group_desc *gdp) ++{ ++ unsigned long start; ++ int bit, bit_max; ++ unsigned free_blocks; ++ struct ext3_sb_info *sbi = EXT3_SB(sb); ++ ++ if (bh) { ++ J_ASSERT_BH(bh, buffer_locked(bh)); ++ ++ /* If checksum is bad mark all blocks use to prevent allocation, ++ * essentially implementing a per-group read-only flag. */ ++ if (!ext3_group_desc_csum_verify(sbi, block_group, gdp)) { ++ ext3_error(sb, __FUNCTION__, ++ "Checksum bad for group %u\n", block_group); ++ gdp->bg_free_blocks_count = 0; ++ gdp->bg_free_inodes_count = 0; ++ gdp->bg_itable_unused = 0; ++ memset(bh->b_data, 0xff, sb->s_blocksize); ++ return 0; ++ } ++ memset(bh->b_data, 0, sb->s_blocksize); ++ } ++ ++ /* Check for superblock and gdt backups in this group */ ++ bit_max = ext3_bg_has_super(sb, block_group); ++ ++ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || ++ block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * ++ sbi->s_desc_per_block) { ++ if (bit_max) { ++ bit_max += ext3_bg_num_gdb(sb, block_group); ++ bit_max +=le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); ++ } ++ } else { /* For META_BG_BLOCK_GROUPS */ ++ int group_rel = (block_group - ++ le32_to_cpu(sbi->s_es->s_first_meta_bg)) % ++ EXT3_DESC_PER_BLOCK(sb); ++ if (group_rel == 0 || group_rel == 1 || ++ (group_rel == EXT3_DESC_PER_BLOCK(sb) - 1)) ++ bit_max += 1; ++ } ++ ++ /* Last and first groups are always initialized */ ++ free_blocks = EXT3_BLOCKS_PER_GROUP(sb) - bit_max; ++ ++ if (bh) { ++ for (bit = 0; bit < bit_max; bit++) ++ ext3_set_bit(bit, bh->b_data); ++ ++ start = block_group * EXT3_BLOCKS_PER_GROUP(sb) + ++ le32_to_cpu(sbi->s_es->s_first_data_block); ++ ++ /* Set bits for block and inode bitmaps, and inode table */ ++ ext3_set_bit(le32_to_cpu(gdp->bg_block_bitmap) - start, ++ bh->b_data); ++ ext3_set_bit(le32_to_cpu(gdp->bg_inode_bitmap) - start, ++ bh->b_data); ++ for (bit = le32_to_cpu(gdp->bg_inode_table) - start, ++ bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++) ++ ext3_set_bit(bit, bh->b_data); ++ } ++ ++ return free_blocks - sbi->s_itb_per_group - 2; ++} ++ + /* + * Read the bitmap for a given block_group, reading into the specified + * slot in the superblock's bitmap cache. +@@ -88,7 +158,19 @@ read_block_bitmap(struct super_block *sb + desc = ext3_get_group_desc (sb, block_group, NULL); + if (!desc) + goto error_out; +- bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); ++ if (desc->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) { ++ bh = sb_getblk(sb, le32_to_cpu(desc->bg_block_bitmap)); ++ if (!buffer_uptodate(bh)) { ++ lock_buffer(bh); ++ if (!buffer_uptodate(bh)) { ++ ext3_init_block_bitmap(sb, bh,block_group,desc); ++ set_buffer_uptodate(bh); ++ } ++ unlock_buffer(bh); ++ } ++ } else { ++ bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); ++ } + if (!bh) + ext3_error (sb, "read_block_bitmap", + "Cannot read block bitmap - " +@@ -467,6 +549,7 @@ do_more: + desc->bg_free_blocks_count = + cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + + group_freed); ++ desc->bg_checksum = ext3_group_desc_csum(sbi, block_group, desc); + spin_unlock(sb_bgl_lock(sbi, block_group)); + percpu_counter_mod(&sbi->s_freeblocks_counter, count); + +@@ -1434,8 +1517,11 @@ allocated: + ret_block, goal_hits, goal_attempts); + + spin_lock(sb_bgl_lock(sbi, group_no)); ++ if (gdp->bg_flags & cpu_to_le16(EXT3_BG_BLOCK_UNINIT)) ++ gdp->bg_flags &= cpu_to_le16(~EXT3_BG_BLOCK_UNINIT); + gdp->bg_free_blocks_count = + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); ++ gdp->bg_checksum = ext3_group_desc_csum(sbi, group_no, gdp); + spin_unlock(sb_bgl_lock(sbi, group_no)); + percpu_counter_mod(&sbi->s_freeblocks_counter, -num); + diff --git a/ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch b/ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch new file mode 100644 index 0000000..9b73784 --- /dev/null +++ b/ldiskfs/kernel_patches/patches/iopen-2.6.22-vanilla.patch @@ -0,0 +1,450 @@ +Index: linux-2.6.22.15/fs/ext3/iopen.c +=================================================================== +--- /dev/null ++++ linux-2.6.22.15/fs/ext3/iopen.c +@@ -0,0 +1,256 @@ ++/* ++ * linux/fs/ext3/iopen.c ++ * ++ * Special support for open by inode number ++ * ++ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu). ++ * ++ * This file may be redistributed under the terms of the GNU General ++ * Public License. ++ * ++ * ++ * Invariants: ++ * - there is only ever a single DCACHE_NFSD_DISCONNECTED dentry alias ++ * for an inode at one time. ++ * - there are never both connected and DCACHE_NFSD_DISCONNECTED dentry ++ * aliases on an inode at the same time. ++ * ++ * If we have any connected dentry aliases for an inode, use one of those ++ * in iopen_lookup(). Otherwise, we instantiate a single NFSD_DISCONNECTED ++ * dentry for this inode, which thereafter will be found by the dcache ++ * when looking up this inode number in __iopen__, so we don't return here ++ * until it is gone. ++ * ++ * If we get an inode via a regular name lookup, then we "rename" the ++ * NFSD_DISCONNECTED dentry to the proper name and parent. This ensures ++ * existing users of the disconnected dentry will continue to use the same ++ * dentry as the connected users, and there will never be both kinds of ++ * dentry aliases at one time. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "iopen.h" ++ ++#ifndef assert ++#define assert(test) J_ASSERT(test) ++#endif ++ ++#define IOPEN_NAME_LEN 32 ++ ++/* ++ * This implements looking up an inode by number. ++ */ ++static struct dentry *iopen_lookup(struct inode * dir, struct dentry *dentry, ++ struct nameidata *nd) ++{ ++ struct inode *inode; ++ unsigned long ino; ++ struct list_head *lp; ++ struct dentry *alternate; ++ char buf[IOPEN_NAME_LEN]; ++ ++ if (dentry->d_name.len >= IOPEN_NAME_LEN) ++ return ERR_PTR(-ENAMETOOLONG); ++ ++ memcpy(buf, dentry->d_name.name, dentry->d_name.len); ++ buf[dentry->d_name.len] = 0; ++ ++ if (strcmp(buf, ".") == 0) ++ ino = dir->i_ino; ++ else if (strcmp(buf, "..") == 0) ++ ino = EXT3_ROOT_INO; ++ else ++ ino = simple_strtoul(buf, 0, 0); ++ ++ if ((ino != EXT3_ROOT_INO && ++ //ino != EXT3_ACL_IDX_INO && ++ //ino != EXT3_ACL_DATA_INO && ++ ino < EXT3_FIRST_INO(dir->i_sb)) || ++ ino > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)) ++ return ERR_PTR(-ENOENT); ++ ++ inode = iget(dir->i_sb, ino); ++ if (!inode) ++ return ERR_PTR(-EACCES); ++ if (is_bad_inode(inode)) { ++ iput(inode); ++ return ERR_PTR(-ENOENT); ++ } ++ ++ assert(list_empty(&dentry->d_alias)); /* d_instantiate */ ++ assert(d_unhashed(dentry)); /* d_rehash */ ++ ++ /* preferrably return a connected dentry */ ++ spin_lock(&dcache_lock); ++ list_for_each(lp, &inode->i_dentry) { ++ alternate = list_entry(lp, struct dentry, d_alias); ++ assert(!(alternate->d_flags & DCACHE_DISCONNECTED)); ++ } ++ ++ if (!list_empty(&inode->i_dentry)) { ++ alternate = list_entry(inode->i_dentry.next, ++ struct dentry, d_alias); ++ dget_locked(alternate); ++ spin_lock(&alternate->d_lock); ++ alternate->d_flags |= DCACHE_REFERENCED; ++ spin_unlock(&alternate->d_lock); ++ iput(inode); ++ spin_unlock(&dcache_lock); ++ return alternate; ++ } ++ dentry->d_flags |= DCACHE_DISCONNECTED; ++ ++ /* d_add(), but don't drop dcache_lock before adding dentry to inode */ ++ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */ ++ dentry->d_inode = inode; ++ ++ d_rehash_cond(dentry, 0); ++ spin_unlock(&dcache_lock); ++ ++ return NULL; ++} ++ ++/* This function is spliced into ext3_lookup and does the move of a ++ * disconnected dentry (if it exists) to a connected dentry. ++ */ ++struct dentry *iopen_connect_dentry(struct dentry *dentry, struct inode *inode, ++ int rehash) ++{ ++ struct dentry *tmp, *goal = NULL; ++ struct list_head *lp; ++ ++ /* verify this dentry is really new */ ++ assert(dentry->d_inode == NULL); ++ assert(list_empty(&dentry->d_alias)); /* d_instantiate */ ++ if (rehash) ++ assert(d_unhashed(dentry)); /* d_rehash */ ++ assert(list_empty(&dentry->d_subdirs)); ++ ++ spin_lock(&dcache_lock); ++ if (!inode) ++ goto do_rehash; ++ ++ if (!test_opt(inode->i_sb, IOPEN)) ++ goto do_instantiate; ++ ++ /* preferrably return a connected dentry */ ++ list_for_each(lp, &inode->i_dentry) { ++ tmp = list_entry(lp, struct dentry, d_alias); ++ if (tmp->d_flags & DCACHE_DISCONNECTED) { ++ assert(tmp->d_alias.next == &inode->i_dentry); ++ assert(tmp->d_alias.prev == &inode->i_dentry); ++ goal = tmp; ++ dget_locked(goal); ++ break; ++ } ++ } ++ ++ if (!goal) ++ goto do_instantiate; ++ ++ /* Move the goal to the de hash queue */ ++ goal->d_flags &= ~DCACHE_DISCONNECTED; ++ security_d_instantiate(goal, inode); ++ __d_drop(dentry); ++ d_rehash_cond(dentry, 0); ++ d_move_locked(goal, dentry); ++ spin_unlock(&dcache_lock); ++ iput(inode); ++ ++ return goal; ++ ++ /* d_add(), but don't drop dcache_lock before adding dentry to inode */ ++do_instantiate: ++ list_add(&dentry->d_alias, &inode->i_dentry); /* d_instantiate */ ++ dentry->d_inode = inode; ++do_rehash: ++ if (rehash) ++ d_rehash_cond(dentry, 0); ++ spin_unlock(&dcache_lock); ++ ++ return NULL; ++} ++ ++/* ++ * These are the special structures for the iopen pseudo directory. ++ */ ++ ++static struct inode_operations iopen_inode_operations = { ++ lookup: iopen_lookup, /* BKL held */ ++}; ++ ++static struct file_operations iopen_file_operations = { ++ read: generic_read_dir, ++}; ++ ++static int match_dentry(struct dentry *dentry, const char *name) ++{ ++ int len; ++ ++ len = strlen(name); ++ if (dentry->d_name.len != len) ++ return 0; ++ if (strncmp(dentry->d_name.name, name, len)) ++ return 0; ++ return 1; ++} ++ ++/* ++ * This function is spliced into ext3_lookup and returns 1 the file ++ * name is __iopen__ and dentry has been filled in appropriately. ++ */ ++int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry) ++{ ++ struct inode *inode; ++ ++ if (dir->i_ino != EXT3_ROOT_INO || ++ !test_opt(dir->i_sb, IOPEN) || ++ !match_dentry(dentry, "__iopen__")) ++ return 0; ++ ++ inode = iget(dir->i_sb, EXT3_BAD_INO); ++ ++ if (!inode) ++ return 0; ++ d_add(dentry, inode); ++ return 1; ++} ++ ++/* ++ * This function is spliced into read_inode; it returns 1 if inode ++ * number is the one for /__iopen__, in which case the inode is filled ++ * in appropriately. Otherwise, this fuction returns 0. ++ */ ++int ext3_iopen_get_inode(struct inode *inode) ++{ ++ if (inode->i_ino != EXT3_BAD_INO) ++ return 0; ++ ++ inode->i_mode = S_IFDIR | S_IRUSR | S_IXUSR; ++ if (test_opt(inode->i_sb, IOPEN_NOPRIV)) ++ inode->i_mode |= 0777; ++ inode->i_uid = 0; ++ inode->i_gid = 0; ++ inode->i_nlink = 1; ++ inode->i_size = 4096; ++ inode->i_atime = CURRENT_TIME; ++ inode->i_ctime = CURRENT_TIME; ++ inode->i_mtime = CURRENT_TIME; ++ EXT3_I(inode)->i_dtime = 0; ++ inode->i_blocks = 0; ++ inode->i_version = 1; ++ inode->i_generation = 0; ++ ++ inode->i_op = &iopen_inode_operations; ++ inode->i_fop = &iopen_file_operations; ++ inode->i_mapping->a_ops = 0; ++ ++ return 1; ++} +Index: linux-2.6.22.15/fs/ext3/iopen.h +=================================================================== +--- /dev/null ++++ linux-2.6.22.15/fs/ext3/iopen.h +@@ -0,0 +1,19 @@ ++/* ++ * iopen.h ++ * ++ * Special support for opening files by inode number. ++ * ++ * Copyright (C) 2001 by Theodore Ts'o (tytso@alum.mit.edu). ++ * ++ * This file may be redistributed under the terms of the GNU General ++ * Public License. ++ */ ++ ++extern int ext3_check_for_iopen(struct inode *dir, struct dentry *dentry); ++extern int ext3_iopen_get_inode(struct inode *inode); ++extern struct dentry *iopen_connect_dentry(struct dentry *dentry, ++ struct inode *inode, int rehash); ++ ++#if !defined(HAVE_D_MOVE_LOCKED) && defined(HAVE___D_MOVE) ++#define d_move_locked(dentry, target) __d_move(dentry, target) ++#endif +Index: linux-2.6.22.15/fs/ext3/inode.c +=================================================================== +--- linux-2.6.22.15.orig/fs/ext3/inode.c ++++ linux-2.6.22.15/fs/ext3/inode.c +@@ -37,6 +37,7 @@ + #include + #include + #include "xattr.h" ++#include "iopen.h" + #include "acl.h" + + static int ext3_writepage_trans_blocks(struct inode *inode); +@@ -2604,6 +2605,8 @@ void ext3_read_inode(struct inode * inod + ei->i_default_acl = EXT3_ACL_NOT_CACHED; + #endif + ei->i_block_alloc_info = NULL; ++ if (ext3_iopen_get_inode(inode)) ++ return; + + if (__ext3_get_inode_loc(inode, &iloc, 0)) + goto bad_inode; +Index: linux-2.6.22.15/fs/ext3/super.c +=================================================================== +--- linux-2.6.22.15.orig/fs/ext3/super.c ++++ linux-2.6.22.15/fs/ext3/super.c +@@ -674,6 +674,7 @@ enum { + Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, + Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, + Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota, ++ Opt_iopen, Opt_noiopen, Opt_iopen_nopriv, + Opt_grpquota + }; + +@@ -723,6 +724,9 @@ static match_table_t tokens = { + {Opt_noquota, "noquota"}, + {Opt_quota, "quota"}, + {Opt_usrquota, "usrquota"}, ++ {Opt_iopen, "iopen"}, ++ {Opt_noiopen, "noiopen"}, ++ {Opt_iopen_nopriv, "iopen_nopriv"}, + {Opt_barrier, "barrier=%u"}, + {Opt_err, NULL}, + {Opt_resize, "resize"}, +@@ -1038,6 +1042,18 @@ clear_qf_name: + else + clear_opt(sbi->s_mount_opt, BARRIER); + break; ++ case Opt_iopen: ++ set_opt (sbi->s_mount_opt, IOPEN); ++ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV); ++ break; ++ case Opt_noiopen: ++ clear_opt (sbi->s_mount_opt, IOPEN); ++ clear_opt (sbi->s_mount_opt, IOPEN_NOPRIV); ++ break; ++ case Opt_iopen_nopriv: ++ set_opt (sbi->s_mount_opt, IOPEN); ++ set_opt (sbi->s_mount_opt, IOPEN_NOPRIV); ++ break; + case Opt_ignore: + break; + case Opt_resize: +Index: linux-2.6.22.15/fs/ext3/namei.c +=================================================================== +--- linux-2.6.22.15.orig/fs/ext3/namei.c ++++ linux-2.6.22.15/fs/ext3/namei.c +@@ -39,6 +39,7 @@ + + #include "namei.h" + #include "xattr.h" ++#include "iopen.h" + #include "acl.h" + + /* +@@ -1047,6 +1048,9 @@ static struct dentry *ext3_lookup(struct + if (dentry->d_name.len > EXT3_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + ++ if (ext3_check_for_iopen(dir, dentry)) ++ return NULL; ++ + bh = ext3_find_entry(dentry, &de); + inode = NULL; + if (bh) { +@@ -1062,7 +1066,8 @@ static struct dentry *ext3_lookup(struct + if (!inode) + return ERR_PTR(-EACCES); + } +- return d_splice_alias(inode, dentry); ++ ++ return iopen_connect_dentry(dentry, inode, 1); + } + + +@@ -2123,10 +2128,6 @@ static int ext3_rmdir (struct inode * di + inode->i_nlink); + inode->i_version++; + clear_nlink(inode); +- /* There's no need to set i_disksize: the fact that i_nlink is +- * zero will ensure that the right thing happens during any +- * recovery. */ +- inode->i_size = 0; + ext3_orphan_add(handle, inode); + inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; + ext3_mark_inode_dirty(handle, inode); +@@ -2250,6 +2251,23 @@ out_stop: + return err; + } + ++/* Like ext3_add_nondir() except for call to iopen_connect_dentry */ ++static int ext3_add_link(handle_t *handle, struct dentry *dentry, ++ struct inode *inode) ++{ ++ int err = ext3_add_entry(handle, dentry, inode); ++ if (!err) { ++ err = ext3_mark_inode_dirty(handle, inode); ++ if (err == 0) { ++ dput(iopen_connect_dentry(dentry, inode, 0)); ++ return 0; ++ } ++ } ++ ext3_dec_count(handle, inode); ++ iput(inode); ++ return err; ++} ++ + static int ext3_link (struct dentry * old_dentry, + struct inode * dir, struct dentry *dentry) + { +@@ -2279,7 +2297,8 @@ retry: + inc_nlink(inode); + atomic_inc(&inode->i_count); + +- err = ext3_add_nondir(handle, dentry, inode); ++ err = ext3_add_link(handle, dentry, inode); ++ ext3_orphan_del(handle, inode); + ext3_journal_stop(handle); + if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) + goto retry; +Index: linux-2.6.22.15/fs/ext3/Makefile +=================================================================== +--- linux-2.6.22.15.orig/fs/ext3/Makefile ++++ linux-2.6.22.15/fs/ext3/Makefile +@@ -4,7 +4,7 @@ + + obj-$(CONFIG_EXT3_FS) += ext3.o + +-ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ ++ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \ + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o + + ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o +Index: linux-2.6.22.15/include/linux/ext3_fs.h +=================================================================== +--- linux-2.6.22.15.orig/include/linux/ext3_fs.h ++++ linux-2.6.22.15/include/linux/ext3_fs.h +@@ -384,6 +384,8 @@ struct ext3_inode { + #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ + #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ + #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ ++#define EXT3_MOUNT_IOPEN 0x400000 /* Allow access via iopen */ ++#define EXT3_MOUNT_IOPEN_NOPRIV 0x800000/* Make iopen world-readable */ + + /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ + #ifndef _LINUX_EXT2_FS_H diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series new file mode 100644 index 0000000..179eb4a --- /dev/null +++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6.22-vanilla.series @@ -0,0 +1,25 @@ +ext3-wantedi-2.6-rhel4.patch +iopen-2.6.22-vanilla.patch +ext3-map_inode_page-2.6.18.patch +export-ext3-2.6-rhel4.patch +ext3-include-fixes-2.6-rhel4.patch +ext3-extents-2.6.22-vanilla.patch +ext3-mballoc3-core.patch +ext3-mballoc3-2.6.22.patch +ext3-nlinks-2.6.22-vanilla.patch +ext3-ialloc-2.6.22-vanilla.patch +ext3-remove-cond_resched-calls-2.6.12.patch +ext3-filterdata-sles10.patch +ext3-uninit-2.6.22-vanilla.patch +ext3-nanosecond-2.6.22-vanilla.patch +ext3-inode-version-2.6.18-vanilla.patch +ext3-mmp-2.6.22-vanilla.patch +#ext3-fiemap-2.6.22-vanilla.patch +ext3-statfs-2.6.22.patch +ext3-dynlocks-common.patch +ext3-dynlocks-2.6.22-vanilla.patch +ext3-iam-common.patch +ext3-iam-2.6.22-vanilla.patch +ext3-lookup-dotdot-2.6.9.patch +ext3-orphans-delay.patch +ext3-export-journal-api.patch -- 1.8.3.1